From 0627d93d2f1e77e827447407db6a1f646ab5ed43 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Tue, 6 Sep 2022 14:33:52 +0200 Subject: [PATCH 001/104] Introduce mux and port the provider to v6. --- go.mod | 5 +++-- go.sum | 6 ++++-- main.go | 52 ++++++++++++++++++++++++++++++++++++++++++++-------- 3 files changed, 51 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 64cf4a435..9a9c09d4a 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,8 @@ require ( github.com/elastic/cloud-sdk-go v1.10.0 github.com/go-openapi/runtime v0.24.1 github.com/go-openapi/strfmt v0.21.3 + github.com/hashicorp/terraform-plugin-go v0.14.0 + github.com/hashicorp/terraform-plugin-mux v0.7.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.21.0 github.com/stretchr/testify v1.8.0 ) @@ -44,7 +46,6 @@ require ( github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.17.2 // indirect github.com/hashicorp/terraform-json v0.14.0 // indirect - github.com/hashicorp/terraform-plugin-go v0.14.0 // indirect github.com/hashicorp/terraform-plugin-log v0.7.0 // indirect github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c // indirect github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 // indirect @@ -72,7 +73,7 @@ require ( golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6 // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/appengine v1.6.6 // indirect - google.golang.org/genproto v0.0.0-20200711021454-869866162049 // indirect + google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d // indirect google.golang.org/grpc v1.48.0 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 67adeac85..097a6d198 100644 --- a/go.sum +++ b/go.sum @@ -287,6 +287,8 @@ github.com/hashicorp/terraform-plugin-go v0.14.0 h1:ttnSlS8bz3ZPYbMb84DpcPhY4F5D github.com/hashicorp/terraform-plugin-go v0.14.0/go.mod h1:2nNCBeRLaenyQEi78xrGrs9hMbulveqG/zDMQSvVJTE= github.com/hashicorp/terraform-plugin-log v0.7.0 h1:SDxJUyT8TwN4l5b5/VkiTIaQgY6R+Y2BQ0sRZftGKQs= github.com/hashicorp/terraform-plugin-log v0.7.0/go.mod h1:p4R1jWBXRTvL4odmEkFfDdhUjHf9zcs/BCoNHAc7IK4= +github.com/hashicorp/terraform-plugin-mux v0.7.0 h1:wRbSYzg+v2sn5Mdee0UKm4YTt4wJG0LfSwtgNuBkglY= +github.com/hashicorp/terraform-plugin-mux v0.7.0/go.mod h1:Ae30Mc5lz4d1awtiCbHP0YyvgBeiQ00Q1nAq0U3lb+I= github.com/hashicorp/terraform-plugin-sdk/v2 v2.21.0 h1:eIJjFlI4k6BMso6Wq/bq56U0RukXc4JbwJJ8Oze2/tg= github.com/hashicorp/terraform-plugin-sdk/v2 v2.21.0/go.mod h1:mYPs/uchNcBq7AclQv9QUtSf9iNcfp1Ag21jqTlDf2M= github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c h1:D8aRO6+mTqHfLsK/BC3j5OAoogv1WLRWzY1AaTo3rBg= @@ -577,8 +579,8 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200711021454-869866162049 h1:YFTFpQhgvrLrmxtiIncJxFXeCyq84ixuKWVCaCAi9Oc= -google.golang.org/genproto v0.0.0-20200711021454-869866162049/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d h1:92D1fum1bJLKSdr11OJ+54YeCMCGYIygTA7R/YZxH5M= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= diff --git a/main.go b/main.go index 0744ef73c..29889ca0c 100644 --- a/main.go +++ b/main.go @@ -18,10 +18,15 @@ package main import ( + "context" "flag" + "log" "github.com/elastic/terraform-provider-ec/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/plugin" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server" + "github.com/hashicorp/terraform-plugin-mux/tf5to6server" + "github.com/hashicorp/terraform-plugin-mux/tf6muxserver" ) //go:generate go run ./gen/gen.go @@ -30,13 +35,44 @@ import ( const ProviderAddr = "registry.terraform.io/elastic/ec" func main() { - var debugMode bool - flag.BoolVar(&debugMode, "debug", false, "set to true to run the provider with support for debuggers like delve") + debugFlag := flag.Bool("debug", false, "set to true to run the provider with support for debuggers like delve") flag.Parse() - plugin.Serve(&plugin.ServeOpts{ - ProviderFunc: ec.Provider, - Debug: debugMode, - ProviderAddr: ProviderAddr, - }) + upgradedSdkProvider, err := tf5to6server.UpgradeServer( + context.Background(), + ec.Provider().GRPCProvider, + ) + + if err != nil { + log.Fatal(err) + } + + ctx := context.Background() + providers := []func() tfprotov6.ProviderServer{ + func() tfprotov6.ProviderServer { return upgradedSdkProvider }, + // TODO + // add new v6 provider with `ec_deployment` resource + } + + muxServer, err := tf6muxserver.NewMuxServer(ctx, providers...) + + if err != nil { + log.Fatal(err) + } + + var serveOpts []tf6server.ServeOpt + + if *debugFlag { + serveOpts = append(serveOpts, tf6server.WithManagedDebug()) + } + + err = tf6server.Serve( + ProviderAddr, + muxServer.ProviderServer, + serveOpts..., + ) + + if err != nil { + log.Fatal(err) + } } From 2213f865ebfde3f9ca20090ae190700997d59980 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Tue, 6 Sep 2022 15:25:10 +0200 Subject: [PATCH 002/104] Update required version of CLI in examples --- examples/deployment/deployment.tf | 2 +- examples/deployment_ccs/deployment.tf | 2 +- examples/deployment_with_init/provider.tf | 2 +- examples/extension_bundle/extension.tf | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/deployment/deployment.tf b/examples/deployment/deployment.tf index bba09d5d8..c0f5864be 100644 --- a/examples/deployment/deployment.tf +++ b/examples/deployment/deployment.tf @@ -1,7 +1,7 @@ terraform { # The Elastic Cloud provider is supported from ">=0.12" # Version later than 0.12.29 is required for this terraform block to work. - required_version = ">= 0.12.29" + required_version = ">= 1.0" required_providers { ec = { diff --git a/examples/deployment_ccs/deployment.tf b/examples/deployment_ccs/deployment.tf index a6eedabde..7efd01323 100644 --- a/examples/deployment_ccs/deployment.tf +++ b/examples/deployment_ccs/deployment.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 0.12.29" + required_version = ">= 1.0" required_providers { ec = { diff --git a/examples/deployment_with_init/provider.tf b/examples/deployment_with_init/provider.tf index 0b41701ac..c4282ff46 100644 --- a/examples/deployment_with_init/provider.tf +++ b/examples/deployment_with_init/provider.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 0.12.29" + required_version = ">= 1.0" required_providers { ec = { diff --git a/examples/extension_bundle/extension.tf b/examples/extension_bundle/extension.tf index 150bec25f..e1f1f903a 100644 --- a/examples/extension_bundle/extension.tf +++ b/examples/extension_bundle/extension.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 0.12.29" + required_version = ">= 1.0" required_providers { ec = { From cf373dbb03c164fd7250bbe3c2763f5196e55c87 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Tue, 6 Sep 2022 15:31:50 +0200 Subject: [PATCH 003/104] Increase TF verson in github workflow --- .github/workflows/go.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index cbf806aad..76d776287 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -21,7 +21,7 @@ jobs: - name: Install terraform uses: hashicorp/setup-terraform@v2 with: - terraform_version: "0.14.x" + terraform_version: "1.x.x" - name: Cache Go Modules uses: actions/cache@v3 From 2194ad39245fdc4fc9e57743f2aab3ef626f7ab4 Mon Sep 17 00:00:00 2001 From: Pascal Hofmann Date: Wed, 31 Aug 2022 06:18:38 +0200 Subject: [PATCH 004/104] Migrate data sources to terraform-provider-framework --- ec/acc/acc_prereq.go | 26 +- ec/acc/datasource_deployment_basic_test.go | 6 +- ec/acc/datasource_stack_test.go | 8 +- ec/acc/datasource_tags_test.go | 6 +- ec/acc/deployment_autoscaling_test.go | 6 +- ec/acc/deployment_basic_defaults_test.go | 12 +- ec/acc/deployment_basic_tags_test.go | 6 +- ec/acc/deployment_basic_test.go | 12 +- ec/acc/deployment_ccs_test.go | 6 +- ec/acc/deployment_compute_optimized_test.go | 6 +- ec/acc/deployment_dedicated_test.go | 12 +- .../deployment_docker_image_override_test.go | 6 +- .../deployment_elasticsearch_kesytore_test.go | 4 +- ec/acc/deployment_emptyconf_test.go | 6 +- ec/acc/deployment_enterprise_search_test.go | 6 +- ec/acc/deployment_extension_basic_test.go | 6 +- .../deployment_extension_bundle_file_test.go | 6 +- ...ployment_extension_plugin_download_test.go | 6 +- .../deployment_failed_upgrade_retry_test.go | 6 +- ec/acc/deployment_hotwarm_test.go | 6 +- ec/acc/deployment_integrations_server_test.go | 6 +- ec/acc/deployment_memory_optimized_test.go | 6 +- ec/acc/deployment_observability_self_test.go | 6 +- ec/acc/deployment_observability_test.go | 6 +- ec/acc/deployment_observability_tpl_test.go | 6 +- .../deployment_post_node_role_upgrade_test.go | 6 +- ...deployment_pre_node_role_migration_test.go | 6 +- ec/acc/deployment_security_test.go | 6 +- ec/acc/deployment_snapshot_test.go | 6 +- ...loyment_traffic_filter_association_test.go | 6 +- ec/acc/deployment_traffic_filter_test.go | 12 +- .../deployment_with_extension_bundle_test.go | 4 +- .../deploymentdatasource/datasource.go | 146 ++++----- .../deploymentdatasource/datasource_test.go | 202 +++++++++---- .../deploymentdatasource/flatteners_apm.go | 69 +++-- .../flatteners_apm_test.go | 53 ++-- .../flatteners_elasticsearch.go | 128 ++++---- .../flatteners_elasticsearch_test.go | 86 ++++-- .../flatteners_enterprise_search.go | 82 +++-- .../flatteners_enterprise_search_test.go | 57 ++-- .../flatteners_integrations_server.go | 69 +++-- .../flatteners_integrations_server_test.go | 93 +++--- .../deploymentdatasource/flatteners_kibana.go | 74 +++-- .../flatteners_kibana_test.go | 50 ++-- .../flatteners_observability.go | 47 ++- .../flatteners_observability_test.go | 38 ++- .../deploymentdatasource/flatteners_tags.go | 14 +- .../flatteners_tags_test.go | 13 +- .../flatteners_traffic_filter.go | 17 +- .../flatteners_traffic_filter_test.go | 14 +- .../deploymentdatasource/schema.go | 141 ++++----- .../deploymentdatasource/schema_apm.go | 116 ++++--- .../schema_elasticsearch.go | 214 ++++++------- .../schema_enterprise_search.go | 134 ++++----- .../schema_integrations_server.go | 116 ++++--- .../deploymentdatasource/schema_kibana.go | 116 ++++--- .../schema_observability.go | 50 ++++ .../deploymentsdatasource/datasource.go | 125 ++++---- .../deploymentsdatasource/datasource_test.go | 137 ++++----- .../deploymentsdatasource/expanders.go | 82 +++-- .../deploymentsdatasource/expanders_test.go | 208 +++++++------ .../deploymentsdatasource/schema.go | 264 ++++++++-------- ec/ecdatasource/stackdatasource/datasource.go | 157 +++++----- .../stackdatasource/datasource_test.go | 183 +++++++----- .../stackdatasource/flatteners_apm.go | 43 ++- .../stackdatasource/flatteners_apm_test.go | 27 +- .../flatteners_elasticsearch.go | 49 +-- .../flatteners_elasticsearch_test.go | 33 +- .../flatteners_enterprise_search.go | 43 ++- .../flatteners_enterprise_search_test.go | 27 +- .../stackdatasource/flatteners_kibana.go | 43 ++- .../stackdatasource/flatteners_kibana_test.go | 27 +- ec/ecdatasource/stackdatasource/schema.go | 173 ++++++----- ec/internal/planmodifier/default_from_env.go | 45 +++ ec/internal/planmodifier/default_value.go | 45 +++ ec/internal/provider.go | 29 ++ ec/internal/util/helpers.go | 66 ++++ ec/internal/validators/knownvalidator.go | 40 +++ ec/internal/validators/urlvalidator.go | 83 ++++++ ec/provider.go | 282 +++++++++++++++--- ec/provider_config.go | 58 ++-- ec/provider_config_test.go | 2 +- ec/version.go | 2 +- go.mod | 50 ++-- go.sum | 166 ++++------- main.go | 8 +- 86 files changed, 2879 insertions(+), 1991 deletions(-) create mode 100644 ec/ecdatasource/deploymentdatasource/schema_observability.go create mode 100644 ec/internal/planmodifier/default_from_env.go create mode 100644 ec/internal/planmodifier/default_value.go create mode 100644 ec/internal/provider.go create mode 100644 ec/internal/validators/knownvalidator.go create mode 100644 ec/internal/validators/urlvalidator.go diff --git a/ec/acc/acc_prereq.go b/ec/acc/acc_prereq.go index f481276ae..9e77eea24 100644 --- a/ec/acc/acc_prereq.go +++ b/ec/acc/acc_prereq.go @@ -18,6 +18,10 @@ package acc import ( + "context" + "github.com/hashicorp/terraform-plugin-framework/providerserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-mux/tf5muxserver" "net/http" "os" "testing" @@ -25,19 +29,27 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/auth" "github.com/elastic/terraform-provider-ec/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) const ( prefix = "terraform_acc_" ) -var testAccProviderFactory = map[string]func() (*schema.Provider, error){ - "ec": providerFactory, -} - -func providerFactory() (*schema.Provider, error) { - return ec.Provider(), nil +var testAccProviderFactory = protoV5ProviderFactories() + +func protoV5ProviderFactories() map[string]func() (tfprotov5.ProviderServer, error) { + return map[string]func() (tfprotov5.ProviderServer, error){ + "ec": func() (tfprotov5.ProviderServer, error) { + return tf5muxserver.NewMuxServer(context.Background(), + func() tfprotov5.ProviderServer { + return ec.LegacyProvider().GRPCProvider() + }, + func() tfprotov5.ProviderServer { + return providerserver.NewProtocol5(ec.New())() + }, + ) + }, + } } func testAccPreCheck(t *testing.T) { diff --git a/ec/acc/datasource_deployment_basic_test.go b/ec/acc/datasource_deployment_basic_test.go index 17d1c149b..556b87570 100644 --- a/ec/acc/datasource_deployment_basic_test.go +++ b/ec/acc/datasource_deployment_basic_test.go @@ -38,9 +38,9 @@ func TestAccDatasourceDeployment_basic(t *testing.T) { var namePrefix = secondRandomName[:22] resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: cfg, diff --git a/ec/acc/datasource_stack_test.go b/ec/acc/datasource_stack_test.go index 69f204b5e..e09613574 100644 --- a/ec/acc/datasource_stack_test.go +++ b/ec/acc/datasource_stack_test.go @@ -31,8 +31,8 @@ func TestAccDatasourceStack_latest(t *testing.T) { cfg := fixtureAccStackDataSource(t, depCfg, getRegion()) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, Steps: []resource.TestStep{ { Config: cfg, @@ -53,8 +53,8 @@ func TestAccDatasourceStack_regex(t *testing.T) { cfg := fixtureAccStackDataSource(t, depCfg, getRegion()) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, Steps: []resource.TestStep{ { Config: cfg, diff --git a/ec/acc/datasource_tags_test.go b/ec/acc/datasource_tags_test.go index ca5417c42..3a653d832 100644 --- a/ec/acc/datasource_tags_test.go +++ b/ec/acc/datasource_tags_test.go @@ -43,9 +43,9 @@ func TestAccDatasource_basic_tags(t *testing.T) { cfg := fixtureAccTagsDataSource(t, depCfg, randomName, getRegion(), defaultTemplate, testID) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: cfg, diff --git a/ec/acc/deployment_autoscaling_test.go b/ec/acc/deployment_autoscaling_test.go index c92810dec..84b8d8f86 100644 --- a/ec/acc/deployment_autoscaling_test.go +++ b/ec/acc/deployment_autoscaling_test.go @@ -37,9 +37,9 @@ func TestAccDeployment_autoscaling(t *testing.T) { } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: cfgF(startCfg), diff --git a/ec/acc/deployment_basic_defaults_test.go b/ec/acc/deployment_basic_defaults_test.go index 1bb66014b..9de4f7e2c 100644 --- a/ec/acc/deployment_basic_defaults_test.go +++ b/ec/acc/deployment_basic_defaults_test.go @@ -42,9 +42,9 @@ func TestAccDeployment_basic_defaults(t *testing.T) { thirdConfigCfg := fixtureAccDeploymentResourceBasicDefaults(t, thirdCfg, randomName, getRegion(), defaultTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: cfg, @@ -161,9 +161,9 @@ func TestAccDeployment_basic_defaults_hw(t *testing.T) { hotWarmCfg := fixtureAccDeploymentResourceBasicDefaults(t, secondCfg, randomName, getRegion(), hotWarmTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: cfg, diff --git a/ec/acc/deployment_basic_tags_test.go b/ec/acc/deployment_basic_tags_test.go index d1bf1a6a0..80fbe6caa 100644 --- a/ec/acc/deployment_basic_tags_test.go +++ b/ec/acc/deployment_basic_tags_test.go @@ -42,9 +42,9 @@ func TestAccDeployment_basic_tags(t *testing.T) { fourthConfigCfg := fixtureAccDeploymentResourceBasicDefaults(t, fourthCfg, randomName, getRegion(), defaultTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { // Create a deployment with tags. diff --git a/ec/acc/deployment_basic_test.go b/ec/acc/deployment_basic_test.go index ec331e744..80987c676 100644 --- a/ec/acc/deployment_basic_test.go +++ b/ec/acc/deployment_basic_test.go @@ -42,9 +42,9 @@ func TestAccDeployment_basic_tf(t *testing.T) { } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: cfg, @@ -98,9 +98,9 @@ func TestAccDeployment_basic_config(t *testing.T) { } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: settingsConfigCfg, diff --git a/ec/acc/deployment_ccs_test.go b/ec/acc/deployment_ccs_test.go index 5845b7b56..16ff4f660 100644 --- a/ec/acc/deployment_ccs_test.go +++ b/ec/acc/deployment_ccs_test.go @@ -43,9 +43,9 @@ func TestAccDeployment_ccs(t *testing.T) { secondConfigCfg := fixtureAccDeploymentResourceBasicDefaults(t, secondCfg, ccsRandomName, getRegion(), ccsTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { // Create a CCS deployment with the default settings. diff --git a/ec/acc/deployment_compute_optimized_test.go b/ec/acc/deployment_compute_optimized_test.go index a1c7c322b..6313e6633 100644 --- a/ec/acc/deployment_compute_optimized_test.go +++ b/ec/acc/deployment_compute_optimized_test.go @@ -33,9 +33,9 @@ func TestAccDeployment_computeOptimized(t *testing.T) { secondConfigCfg := fixtureAccDeploymentResourceBasicDefaults(t, secondCfg, randomName, getRegion(), computeOpTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { // Create a Compute Optimized deployment with the default settings. diff --git a/ec/acc/deployment_dedicated_test.go b/ec/acc/deployment_dedicated_test.go index 50d11f2bd..202caf5f4 100644 --- a/ec/acc/deployment_dedicated_test.go +++ b/ec/acc/deployment_dedicated_test.go @@ -31,9 +31,9 @@ func TestAccDeployment_dedicated_coordinating(t *testing.T) { cfg := fixtureAccDeploymentResourceBasicDefaults(t, startCfg, randomName, getRegion(), hotWarmTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { // Create a deployment with dedicated coordinating. @@ -78,9 +78,9 @@ func TestAccDeployment_dedicated_master(t *testing.T) { cfg := fixtureAccDeploymentResourceBasicDefaults(t, startCfg, randomName, getRegion(), hotWarmTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { // Create a deployment with dedicated master nodes. diff --git a/ec/acc/deployment_docker_image_override_test.go b/ec/acc/deployment_docker_image_override_test.go index f748a221a..d977774f9 100644 --- a/ec/acc/deployment_docker_image_override_test.go +++ b/ec/acc/deployment_docker_image_override_test.go @@ -44,9 +44,9 @@ func TestAccDeployment_docker_image_override(t *testing.T) { } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: cfgF("testdata/deployment_docker_image_override.tf"), diff --git a/ec/acc/deployment_elasticsearch_kesytore_test.go b/ec/acc/deployment_elasticsearch_kesytore_test.go index 5870bc2a6..f6d557daa 100644 --- a/ec/acc/deployment_elasticsearch_kesytore_test.go +++ b/ec/acc/deployment_elasticsearch_kesytore_test.go @@ -46,8 +46,8 @@ func TestAccDeploymentElasticsearchKeystore_full(t *testing.T) { } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, CheckDestroy: resource.ComposeAggregateTestCheckFunc( testAccDeploymentDestroy, testAccDeploymentElasticsearchKeystoreDestroy, diff --git a/ec/acc/deployment_emptyconf_test.go b/ec/acc/deployment_emptyconf_test.go index 2881fb507..96126adca 100644 --- a/ec/acc/deployment_emptyconf_test.go +++ b/ec/acc/deployment_emptyconf_test.go @@ -36,9 +36,9 @@ func TestAccDeployment_emptyconfig(t *testing.T) { } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: cfgF(startCfg), diff --git a/ec/acc/deployment_enterprise_search_test.go b/ec/acc/deployment_enterprise_search_test.go index b5364a9c9..51a3b4b96 100644 --- a/ec/acc/deployment_enterprise_search_test.go +++ b/ec/acc/deployment_enterprise_search_test.go @@ -33,9 +33,9 @@ func TestAccDeployment_enterpriseSearch(t *testing.T) { secondConfigCfg := fixtureAccDeploymentResourceBasicDefaults(t, secondCfg, randomName, getRegion(), enterpriseSearchTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { // Create an Enterprise Search deployment with the default settings. diff --git a/ec/acc/deployment_extension_basic_test.go b/ec/acc/deployment_extension_basic_test.go index 0a754bcfd..8395880f2 100644 --- a/ec/acc/deployment_extension_basic_test.go +++ b/ec/acc/deployment_extension_basic_test.go @@ -34,9 +34,9 @@ func TestAccDeploymentExtension_basic(t *testing.T) { cfg2 := fixtureAccExtensionBasicWithTF(t, "testdata/extension_basic.tf", randomName, "updated desc") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccExtensionDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccExtensionDestroy, Steps: []resource.TestStep{ { Config: cfg, diff --git a/ec/acc/deployment_extension_bundle_file_test.go b/ec/acc/deployment_extension_bundle_file_test.go index c4bcbeffb..8c7a267f3 100644 --- a/ec/acc/deployment_extension_bundle_file_test.go +++ b/ec/acc/deployment_extension_bundle_file_test.go @@ -43,9 +43,9 @@ func TestAccDeploymentExtension_bundleFile(t *testing.T) { cfg := fixtureAccExtensionBundleWithTF(t, "testdata/extension_bundle_file.tf", filePath, randomName, "desc") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccExtensionDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccExtensionDestroy, Steps: []resource.TestStep{ { PreConfig: func() { writeFile(t, filePath, "extension.txt", "foo") }, diff --git a/ec/acc/deployment_extension_plugin_download_test.go b/ec/acc/deployment_extension_plugin_download_test.go index dc3fdd5ea..807838842 100644 --- a/ec/acc/deployment_extension_plugin_download_test.go +++ b/ec/acc/deployment_extension_plugin_download_test.go @@ -34,9 +34,9 @@ func TestAccDeploymentExtension_pluginDownload(t *testing.T) { cfg := fixtureAccExtensionBundleDownloadWithTF(t, "testdata/extension_plugin_download.tf", randomName, downloadURL) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccExtensionDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccExtensionDestroy, Steps: []resource.TestStep{ { Config: cfg, diff --git a/ec/acc/deployment_failed_upgrade_retry_test.go b/ec/acc/deployment_failed_upgrade_retry_test.go index 36c3f25c8..df098b665 100644 --- a/ec/acc/deployment_failed_upgrade_retry_test.go +++ b/ec/acc/deployment_failed_upgrade_retry_test.go @@ -32,9 +32,9 @@ func TestAccDeployment_failed_upgrade_retry(t *testing.T) { var esCreds creds resName := "ec_deployment.upgrade_retry" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: fixtureDeploymentDefaults(t, "testdata/deployment_upgrade_retry_1.tf"), diff --git a/ec/acc/deployment_hotwarm_test.go b/ec/acc/deployment_hotwarm_test.go index 5a800a2b3..82f3d81ad 100644 --- a/ec/acc/deployment_hotwarm_test.go +++ b/ec/acc/deployment_hotwarm_test.go @@ -38,9 +38,9 @@ func TestAccDeployment_hotwarm(t *testing.T) { secondConfigCfg := fixtureAccDeploymentResourceBasic(t, secondCfg, randomName, getRegion(), hotWarmTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { // Create a Hot / Warm deployment with the default settings. diff --git a/ec/acc/deployment_integrations_server_test.go b/ec/acc/deployment_integrations_server_test.go index 2bc714aba..34f7a4f55 100644 --- a/ec/acc/deployment_integrations_server_test.go +++ b/ec/acc/deployment_integrations_server_test.go @@ -33,9 +33,9 @@ func TestAccDeployment_integrationsServer(t *testing.T) { secondConfigCfg := fixtureAccDeploymentResourceBasicDefaults(t, secondCfg, randomName, getRegion(), defaultTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { // Create an Integrations Server deployment with the default settings. diff --git a/ec/acc/deployment_memory_optimized_test.go b/ec/acc/deployment_memory_optimized_test.go index 5c58756f7..bb3961cef 100644 --- a/ec/acc/deployment_memory_optimized_test.go +++ b/ec/acc/deployment_memory_optimized_test.go @@ -33,9 +33,9 @@ func TestAccDeployment_memoryOptimized(t *testing.T) { secondConfigCfg := fixtureAccDeploymentResourceBasicDefaults(t, secondCfg, randomName, getRegion(), memoryOpTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { // Create a Memory Optimized deployment with the default settings. diff --git a/ec/acc/deployment_observability_self_test.go b/ec/acc/deployment_observability_self_test.go index 375a2e83a..f02ad5e9b 100644 --- a/ec/acc/deployment_observability_self_test.go +++ b/ec/acc/deployment_observability_self_test.go @@ -33,9 +33,9 @@ func TestAccDeployment_observability_createWithSelfObservability(t *testing.T) { config := fixtureAccDeploymentResourceSelfObs(t, configFile, randomName, getRegion(), defaultTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { // Create a deployment with observability-target 'self' diff --git a/ec/acc/deployment_observability_test.go b/ec/acc/deployment_observability_test.go index 0a23bc1de..050ad7ada 100644 --- a/ec/acc/deployment_observability_test.go +++ b/ec/acc/deployment_observability_test.go @@ -40,9 +40,9 @@ func TestAccDeployment_observability(t *testing.T) { fourthCfg := fixtureAccDeploymentResourceBasicObs(t, removeObsCfg, randomName, getRegion(), defaultTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: cfg, diff --git a/ec/acc/deployment_observability_tpl_test.go b/ec/acc/deployment_observability_tpl_test.go index 5d8d7da77..a5d343afb 100644 --- a/ec/acc/deployment_observability_tpl_test.go +++ b/ec/acc/deployment_observability_tpl_test.go @@ -33,9 +33,9 @@ func TestAccDeployment_observabilityTpl(t *testing.T) { secondConfigCfg := fixtureAccDeploymentResourceBasicDefaults(t, secondCfg, randomName, getRegion(), observabilityTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { // Create an Observability deployment with the default settings. diff --git a/ec/acc/deployment_post_node_role_upgrade_test.go b/ec/acc/deployment_post_node_role_upgrade_test.go index 0869cdda9..f60912224 100644 --- a/ec/acc/deployment_post_node_role_upgrade_test.go +++ b/ec/acc/deployment_post_node_role_upgrade_test.go @@ -37,9 +37,9 @@ func TestAccDeployment_post_node_roles(t *testing.T) { } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: cfgF(startCfg), diff --git a/ec/acc/deployment_pre_node_role_migration_test.go b/ec/acc/deployment_pre_node_role_migration_test.go index f20cd68ee..be045253f 100644 --- a/ec/acc/deployment_pre_node_role_migration_test.go +++ b/ec/acc/deployment_pre_node_role_migration_test.go @@ -38,9 +38,9 @@ func TestAccDeployment_pre_node_roles(t *testing.T) { } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: cfgF(startCfg), diff --git a/ec/acc/deployment_security_test.go b/ec/acc/deployment_security_test.go index 5535ad6e1..2fc3ea281 100644 --- a/ec/acc/deployment_security_test.go +++ b/ec/acc/deployment_security_test.go @@ -33,9 +33,9 @@ func TestAccDeployment_security(t *testing.T) { secondConfigCfg := fixtureAccDeploymentResourceBasicDefaults(t, secondCfg, randomName, getRegion(), securityTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { // Create a Security deployment with the default settings. diff --git a/ec/acc/deployment_snapshot_test.go b/ec/acc/deployment_snapshot_test.go index 739a96edb..0fb391521 100644 --- a/ec/acc/deployment_snapshot_test.go +++ b/ec/acc/deployment_snapshot_test.go @@ -38,9 +38,9 @@ func TestAccDeployment_snapshot_restore(t *testing.T) { t.Skip("skipped due flakiness: https://github.com/elastic/terraform-provider-ec/issues/443") var esCreds creds resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: fixtureDeploymentDefaults(t, "testdata/deployment_snapshot_1.tf"), diff --git a/ec/acc/deployment_traffic_filter_association_test.go b/ec/acc/deployment_traffic_filter_association_test.go index ba61bb3eb..0fee2adc6 100644 --- a/ec/acc/deployment_traffic_filter_association_test.go +++ b/ec/acc/deployment_traffic_filter_association_test.go @@ -38,9 +38,9 @@ func TestAccDeploymentTrafficFilterAssociation_basic(t *testing.T) { updateConfigCfg := fixtureAccDeploymentTrafficFilterResourceAssociationBasic(t, updateCfg, randomNameSecond, getRegion(), defaultTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentTrafficFilterDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentTrafficFilterDestroy, Steps: []resource.TestStep{ { // Expects a non-empty plan since "ec_deployment.traffic_filter" diff --git a/ec/acc/deployment_traffic_filter_test.go b/ec/acc/deployment_traffic_filter_test.go index 3a6fc996e..86e792d16 100644 --- a/ec/acc/deployment_traffic_filter_test.go +++ b/ec/acc/deployment_traffic_filter_test.go @@ -38,9 +38,9 @@ func TestAccDeploymentTrafficFilter_basic(t *testing.T) { updateLargeConfigCfg := fixtureAccDeploymentTrafficFilterResourceBasic(t, updateLargeCfg, randomName, getRegion()) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentTrafficFilterDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentTrafficFilterDestroy, Steps: []resource.TestStep{ { Config: cfg, @@ -96,9 +96,9 @@ func TestAccDeploymentTrafficFilter_azure(t *testing.T) { cfg := fixtureAccDeploymentTrafficFilterResourceBasic(t, startCfg, randomName, "azure-australiaeast") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentTrafficFilterDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentTrafficFilterDestroy, Steps: []resource.TestStep{ { Config: cfg, diff --git a/ec/acc/deployment_with_extension_bundle_test.go b/ec/acc/deployment_with_extension_bundle_test.go index c05ab8e91..5c632c9b9 100644 --- a/ec/acc/deployment_with_extension_bundle_test.go +++ b/ec/acc/deployment_with_extension_bundle_test.go @@ -43,8 +43,8 @@ func TestAccDeployment_withExtension(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV5ProviderFactories: testAccProviderFactory, CheckDestroy: func(s *terraform.State) error { merr := multierror.NewPrefixed("checking resource with extension") diff --git a/ec/ecdatasource/deploymentdatasource/datasource.go b/ec/ecdatasource/deploymentdatasource/datasource.go index 54ddd549f..e940639b9 100644 --- a/ec/ecdatasource/deploymentdatasource/datasource.go +++ b/ec/ecdatasource/deploymentdatasource/datasource.go @@ -19,39 +19,46 @@ package deploymentdatasource import ( "context" - "time" + "fmt" + "github.com/elastic/terraform-provider-ec/ec/internal" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deputil" "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) -// DataSource returns the ec_deployment data source schema. -func DataSource() *schema.Resource { - return &schema.Resource{ - ReadContext: read, +var _ provider.DataSourceType = (*DataSourceType)(nil) - Schema: newSchema(), +type DataSourceType struct{} - Timeouts: &schema.ResourceTimeout{ - Default: schema.DefaultTimeout(5 * time.Minute), - }, - } +func (s DataSourceType) NewDataSource(ctx context.Context, p provider.Provider) (datasource.DataSource, diag.Diagnostics) { + return &deploymentDataSource{ + p: p.(internal.Provider), + }, nil } -func read(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*api.API) - deploymentID := d.Get("id").(string) +var _ datasource.DataSource = (*deploymentDataSource)(nil) + +type deploymentDataSource struct { + p internal.Provider +} + +func (d deploymentDataSource) Read(ctx context.Context, request datasource.ReadRequest, response *datasource.ReadResponse) { + var newState modelV0 + response.Diagnostics.Append(request.Config.Get(ctx, &newState)...) + if response.Diagnostics.HasError() { + return + } res, err := deploymentapi.Get(deploymentapi.GetParams{ - API: client, - DeploymentID: deploymentID, + API: d.p.GetClient(), + DeploymentID: newState.ID.Value, QueryParams: deputil.QueryParams{ ShowPlans: true, ShowSettings: true, @@ -60,92 +67,55 @@ func read(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diag }, }) if err != nil { - return diag.FromErr( - multierror.NewPrefixed("failed retrieving deployment information", err), + response.Diagnostics.AddError( + "Failed retrieving deployment information", + fmt.Sprintf("Failed retrieving deployment information: %s", err), ) + return } - d.SetId(deploymentID) - - if err := modelToState(d, res); err != nil { - return diag.FromErr(err) + response.Diagnostics.Append(modelToState(ctx, res, &newState)...) + if response.Diagnostics.HasError() { + return } - return nil + // Finally, set the state + response.Diagnostics.Append(response.State.Set(ctx, newState)...) } -func modelToState(d *schema.ResourceData, res *models.DeploymentGetResponse) error { - if err := d.Set("name", res.Name); err != nil { - return err - } +/* + TODO - see https://github.com/multani/terraform-provider-camunda/pull/16/files - if err := d.Set("healthy", res.Healthy); err != nil { - return err - } + Timeouts: &schema.ResourceTimeout{ + Default: schema.DefaultTimeout(5 * time.Minute), + }, +*/ - if err := d.Set("alias", res.Alias); err != nil { - return err - } +func modelToState(ctx context.Context, res *models.DeploymentGetResponse, state *modelV0) diag.Diagnostics { + var diags diag.Diagnostics + + state.Name = types.String{Value: *res.Name} + state.Healthy = types.Bool{Value: *res.Healthy} + state.Alias = types.String{Value: res.Alias} es := res.Resources.Elasticsearch[0] if es.Region != nil { - if err := d.Set("region", *es.Region); err != nil { - return err - } + state.Region = types.String{Value: *es.Region} } if !util.IsCurrentEsPlanEmpty(es) { - if err := d.Set("deployment_template_id", - *es.Info.PlanInfo.Current.Plan.DeploymentTemplate.ID); err != nil { - return err - } + state.DeploymentTemplateID = types.String{Value: *es.Info.PlanInfo.Current.Plan.DeploymentTemplate.ID} } - if settings := flattenTrafficFiltering(res.Settings); settings != nil { - if err := d.Set("traffic_filter", settings); err != nil { - return err - } - } - - if observability := flattenObservability(res.Settings); len(observability) > 0 { - if err := d.Set("observability", observability); err != nil { - return err - } - } + diags.Append(flattenTrafficFiltering(ctx, res.Settings, &state.TrafficFilter)...) + diags.Append(flattenObservability(ctx, res.Settings, &state.Observability)...) + diags.Append(flattenElasticsearchResources(ctx, res.Resources.Elasticsearch, &state.Elasticsearch)...) + diags.Append(flattenKibanaResources(ctx, res.Resources.Kibana, &state.Kibana)...) + diags.Append(flattenApmResources(ctx, res.Resources.Apm, &state.Apm)...) + diags.Append(flattenIntegrationsServerResources(ctx, res.Resources.IntegrationsServer, &state.IntegrationsServer)...) + diags.Append(flattenEnterpriseSearchResources(ctx, res.Resources.EnterpriseSearch, &state.EnterpriseSearch)...) - elasticsearchFlattened, err := flattenElasticsearchResources(res.Resources.Elasticsearch) - if err != nil { - return err - } - if err := d.Set("elasticsearch", elasticsearchFlattened); err != nil { - return err - } - - kibanaFlattened := flattenKibanaResources(res.Resources.Kibana) - if err := d.Set("kibana", kibanaFlattened); err != nil { - return err - } - - apmFlattened := flattenApmResources(res.Resources.Apm) - if err := d.Set("apm", apmFlattened); err != nil { - return err - } - - integrationsServerFlattened := flattenIntegrationsServerResources(res.Resources.IntegrationsServer) - if err := d.Set("integrations_server", integrationsServerFlattened); err != nil { - return err - } - - enterpriseSearchFlattened := flattenEnterpriseSearchResources(res.Resources.EnterpriseSearch) - if err := d.Set("enterprise_search", enterpriseSearchFlattened); err != nil { - return err - } - - if tagsFlattened := flattenTags(res.Metadata); tagsFlattened != nil { - if err := d.Set("tags", tagsFlattened); err != nil { - return err - } - } + state.Tags = flattenTags(res.Metadata) - return nil + return diags } diff --git a/ec/ecdatasource/deploymentdatasource/datasource_test.go b/ec/ecdatasource/deploymentdatasource/datasource_test.go index 74fed579c..2126b708d 100644 --- a/ec/ecdatasource/deploymentdatasource/datasource_test.go +++ b/ec/ecdatasource/deploymentdatasource/datasource_test.go @@ -18,42 +18,33 @@ package deploymentdatasource import ( + "context" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" "testing" "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/stretchr/testify/assert" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_modelToState(t *testing.T) { - deploymentSchemaArg := schema.TestResourceDataRaw(t, newSchema(), nil) - deploymentSchemaArg.SetId(mock.ValidClusterID) - - wantDeployment := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleDeployment(), - Schema: newSchema(), - }) - + wantDeployment := newSampleDeployment() type args struct { - d *schema.ResourceData res *models.DeploymentGetResponse } tests := []struct { name string args args - want *schema.ResourceData + want modelV0 err error }{ { name: "flattens deployment resources", want: wantDeployment, args: args{ - d: deploymentSchemaArg, res: &models.DeploymentGetResponse{ Alias: "some-alias", ID: &mock.ValidClusterID, @@ -135,54 +126,155 @@ func Test_modelToState(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := modelToState(tt.args.d, tt.args.res) + model := modelV0{ + ID: types.String{Value: mock.ValidClusterID}, + } + diags := modelToState(context.Background(), tt.args.res, &model) if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) + assert.Equal(t, diags, tt.err) } else { - assert.NoError(t, err) + assert.Empty(t, diags) } - assert.Equal(t, tt.want.State().Attributes, tt.args.d.State().Attributes) + assert.Equal(t, tt.want, model) }) } } -func newSampleDeployment() map[string]interface{} { - return map[string]interface{}{ - "id": mock.ValidClusterID, - "name": "my_deployment_name", - "alias": "some-alias", - "deployment_template_id": "aws-io-optimized", - "healthy": true, - "region": "us-east-1", - "traffic_filter": []interface{}{"0.0.0.0/0", "192.168.10.0/24"}, - "observability": []interface{}{newObservabilitySample()}, - "elasticsearch": []interface{}{map[string]interface{}{ - "healthy": true, - }}, - "kibana": []interface{}{map[string]interface{}{ - "healthy": true, - }}, - "apm": []interface{}{map[string]interface{}{ - "healthy": true, - }}, - "integrations_server": []interface{}{map[string]interface{}{ - "healthy": true, - }}, - "enterprise_search": []interface{}{map[string]interface{}{ - "healthy": true, - }}, - "tags": map[string]interface{}{ - "foo": "bar", +func newSampleDeployment() modelV0 { + return modelV0{ + ID: types.String{Value: mock.ValidClusterID}, + Name: types.String{Value: "my_deployment_name"}, + Alias: types.String{Value: "some-alias"}, + DeploymentTemplateID: types.String{Value: "aws-io-optimized"}, + Healthy: types.Bool{Value: true}, + Region: types.String{Value: "us-east-1"}, + TrafficFilter: util.StringListAsType([]string{"0.0.0.0/0", "192.168.10.0/24"}), + Observability: types.List{ + ElemType: types.ObjectType{AttrTypes: observabilitySettingsAttrTypes()}, + Elems: []attr.Value{ + types.Object{ + AttrTypes: observabilitySettingsAttrTypes(), + Attrs: map[string]attr.Value{ + "deployment_id": types.String{Value: mock.ValidClusterID}, + "ref_id": types.String{Value: "main-elasticsearch"}, + "logs": types.Bool{Value: true}, + "metrics": types.Bool{Value: true}, + }, + }, + }, }, - } -} - -func newObservabilitySample() map[string]interface{} { - return map[string]interface{}{ - "deployment_id": mock.ValidClusterID, - "ref_id": "main-elasticsearch", - "logs": true, - "metrics": true, + Elasticsearch: types.List{ + ElemType: types.ObjectType{AttrTypes: elasticsearchResourceInfoAttrTypes()}, + Elems: []attr.Value{ + types.Object{ + AttrTypes: elasticsearchResourceInfoAttrTypes(), + Attrs: map[string]attr.Value{ + "cloud_id": types.String{Value: ""}, + "healthy": types.Bool{Value: true}, + "autoscale": types.String{Value: ""}, + "http_endpoint": types.String{Value: ""}, + "https_endpoint": types.String{Value: ""}, + "ref_id": types.String{Value: ""}, + "resource_id": types.String{Value: ""}, + "status": types.String{Value: ""}, + "version": types.String{Value: ""}, + "topology": types.List{ + ElemType: types.ObjectType{AttrTypes: elasticsearchTopologyAttrTypes()}, + Elems: []attr.Value{}, + }, + }, + }, + }, + }, + Kibana: types.List{ + ElemType: types.ObjectType{AttrTypes: kibanaResourceInfoAttrTypes()}, + Elems: []attr.Value{ + types.Object{ + AttrTypes: kibanaResourceInfoAttrTypes(), + Attrs: map[string]attr.Value{ + "elasticsearch_cluster_ref_id": types.String{Value: ""}, + "healthy": types.Bool{Value: true}, + "http_endpoint": types.String{Value: ""}, + "https_endpoint": types.String{Value: ""}, + "ref_id": types.String{Value: ""}, + "resource_id": types.String{Value: ""}, + "status": types.String{Value: ""}, + "version": types.String{Value: ""}, + "topology": types.List{ + ElemType: types.ObjectType{AttrTypes: kibanaTopologyAttrTypes()}, + Elems: []attr.Value{}, + }, + }, + }, + }, + }, + Apm: types.List{ + ElemType: types.ObjectType{AttrTypes: apmResourceInfoAttrTypes()}, + Elems: []attr.Value{ + types.Object{ + AttrTypes: apmResourceInfoAttrTypes(), + Attrs: map[string]attr.Value{ + "elasticsearch_cluster_ref_id": types.String{Value: ""}, + "healthy": types.Bool{Value: true}, + "http_endpoint": types.String{Value: ""}, + "https_endpoint": types.String{Value: ""}, + "ref_id": types.String{Value: ""}, + "resource_id": types.String{Value: ""}, + "status": types.String{Value: ""}, + "version": types.String{Value: ""}, + "topology": types.List{ + ElemType: types.ObjectType{AttrTypes: apmTopologyAttrTypes()}, + Elems: []attr.Value{}, + }, + }, + }, + }, + }, + IntegrationsServer: types.List{ + ElemType: types.ObjectType{AttrTypes: integrationsServerResourceInfoAttrTypes()}, + Elems: []attr.Value{ + types.Object{ + AttrTypes: integrationsServerResourceInfoAttrTypes(), + Attrs: map[string]attr.Value{ + "elasticsearch_cluster_ref_id": types.String{Value: ""}, + "healthy": types.Bool{Value: true}, + "http_endpoint": types.String{Value: ""}, + "https_endpoint": types.String{Value: ""}, + "ref_id": types.String{Value: ""}, + "resource_id": types.String{Value: ""}, + "status": types.String{Value: ""}, + "version": types.String{Value: ""}, + "topology": types.List{ + ElemType: types.ObjectType{AttrTypes: integrationsServerTopologyAttrTypes()}, + Elems: []attr.Value{}, + }, + }, + }, + }, + }, + EnterpriseSearch: types.List{ + ElemType: types.ObjectType{AttrTypes: enterpriseSearchResourceInfoAttrTypes()}, + Elems: []attr.Value{ + types.Object{ + AttrTypes: enterpriseSearchResourceInfoAttrTypes(), + Attrs: map[string]attr.Value{ + "elasticsearch_cluster_ref_id": types.String{Value: ""}, + "healthy": types.Bool{Value: true}, + "http_endpoint": types.String{Value: ""}, + "https_endpoint": types.String{Value: ""}, + "ref_id": types.String{Value: ""}, + "resource_id": types.String{Value: ""}, + "status": types.String{Value: ""}, + "version": types.String{Value: ""}, + "topology": types.List{ + ElemType: types.ObjectType{AttrTypes: enterpriseSearchTopologyAttrTypes()}, + Elems: []attr.Value{}, + }, + }, + }, + }, + }, + Tags: util.StringMapAsType(map[string]string{"foo": "bar"}), } } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_apm.go b/ec/ecdatasource/deploymentdatasource/flatteners_apm.go index 1369bd4a9..15ad5aaf9 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_apm.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_apm.go @@ -18,84 +18,107 @@ package deploymentdatasource import ( + "context" "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) // flattenApmResources takes in Apm resource models and returns its // flattened form. -func flattenApmResources(in []*models.ApmResourceInfo) []interface{} { - var result = make([]interface{}, 0, len(in)) +func flattenApmResources(ctx context.Context, in []*models.ApmResourceInfo, target interface{}) diag.Diagnostics { + var diags diag.Diagnostics + var result = make([]apmResourceModelV0, 0, len(in)) + for _, res := range in { - var m = make(map[string]interface{}) + model := apmResourceModelV0{ + Topology: types.List{ElemType: types.ObjectType{AttrTypes: apmTopologyAttrTypes()}}, + } if res.ElasticsearchClusterRefID != nil { - m["elasticsearch_cluster_ref_id"] = *res.ElasticsearchClusterRefID + model.ElasticsearchClusterRefID = types.String{Value: *res.ElasticsearchClusterRefID} } if res.RefID != nil { - m["ref_id"] = *res.RefID + model.RefID = types.String{Value: *res.RefID} } if res.Info != nil { if res.Info.Healthy != nil { - m["healthy"] = *res.Info.Healthy + model.Healthy = types.Bool{Value: *res.Info.Healthy} } if res.Info.ID != nil { - m["resource_id"] = *res.Info.ID + model.ResourceID = types.String{Value: *res.Info.ID} } if res.Info.Status != nil { - m["status"] = *res.Info.Status + model.Status = types.String{Value: *res.Info.Status} } if !util.IsCurrentApmPlanEmpty(res) { var plan = res.Info.PlanInfo.Current.Plan if plan.Apm != nil { - m["version"] = plan.Apm.Version + model.Version = types.String{Value: plan.Apm.Version} } - m["topology"] = flattenApmTopology(plan) + diags.Append(flattenApmTopology(ctx, plan, &model.Topology)...) } if res.Info.Metadata != nil { - for k, v := range util.FlattenClusterEndpoint(res.Info.Metadata) { - m[k] = v + endpoints := util.FlattenClusterEndpoint(res.Info.Metadata) + if endpoints != nil { + model.HttpEndpoint = types.String{Value: endpoints["http_endpoint"].(string)} + model.HttpsEndpoint = types.String{Value: endpoints["https_endpoint"].(string)} } } } - result = append(result, m) + result = append(result, model) } - return result + diags.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: apmResourceInfoAttrTypes(), + }, + }, target)...) + + return diags } -func flattenApmTopology(plan *models.ApmPlan) []interface{} { - var result = make([]interface{}, 0, len(plan.ClusterTopology)) +func flattenApmTopology(ctx context.Context, plan *models.ApmPlan, target interface{}) diag.Diagnostics { + var diags diag.Diagnostics + var result = make([]apmTopologyModelV0, 0, len(plan.ClusterTopology)) for _, topology := range plan.ClusterTopology { - var m = make(map[string]interface{}) + var model apmTopologyModelV0 if isApmSizePopulated(topology) && *topology.Size.Value == 0 { continue } - m["instance_configuration_id"] = topology.InstanceConfigurationID + model.InstanceConfigurationID = types.String{Value: topology.InstanceConfigurationID} if isApmSizePopulated(topology) { - m["size"] = util.MemoryToState(*topology.Size.Value) - m["size_resource"] = *topology.Size.Resource + model.Size = types.String{Value: util.MemoryToState(*topology.Size.Value)} + model.SizeResource = types.String{Value: *topology.Size.Resource} } - m["zone_count"] = topology.ZoneCount + model.ZoneCount = types.Int64{Value: int64(topology.ZoneCount)} - result = append(result, m) + result = append(result, model) } - return result + diags.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: apmTopologyAttrTypes(), + }, + }, target)...) + + return diags } func isApmSizePopulated(topology *models.ApmTopologyElement) bool { diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go index cbcdab473..570d696a3 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go @@ -18,12 +18,15 @@ package deploymentdatasource import ( + "context" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" "testing" "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" ) func Test_flattenApmResource(t *testing.T) { @@ -33,12 +36,12 @@ func Test_flattenApmResource(t *testing.T) { tests := []struct { name string args args - want []interface{} + want []apmResourceModelV0 }{ { name: "empty resource list returns empty list", args: args{in: []*models.ApmResourceInfo{}}, - want: []interface{}{}, + want: []apmResourceModelV0{}, }, { name: "parses the apm resource", @@ -85,31 +88,37 @@ func Test_flattenApmResource(t *testing.T) { }, }, }}, - want: []interface{}{ - map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-apm", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "http_endpoint": "http://apmresource.cloud.elastic.co:9200", - "https_endpoint": "https://apmresource.cloud.elastic.co:9243", - "healthy": true, - "status": "started", - "topology": []interface{}{ - map[string]interface{}{ - "instance_configuration_id": "aws.apm.r4", - "size": "1g", - "size_resource": "memory", - "zone_count": int32(1), + want: []apmResourceModelV0{{ + ElasticsearchClusterRefID: types.String{Value: "main-elasticsearch"}, + RefID: types.String{Value: "main-apm"}, + ResourceID: types.String{Value: mock.ValidClusterID}, + Version: types.String{Value: "7.7.0"}, + HttpEndpoint: types.String{Value: "http://apmresource.cloud.elastic.co:9200"}, + HttpsEndpoint: types.String{Value: "https://apmresource.cloud.elastic.co:9243"}, + Healthy: types.Bool{Value: true}, + Status: types.String{Value: "started"}, + Topology: types.List{ElemType: types.ObjectType{AttrTypes: apmTopologyAttrTypes()}, + Elems: []attr.Value{types.Object{ + AttrTypes: apmTopologyAttrTypes(), + Attrs: map[string]attr.Value{ + "instance_configuration_id": types.String{Value: "aws.apm.r4"}, + "size": types.String{Value: "1g"}, + "size_resource": types.String{Value: "memory"}, + "zone_count": types.Int64{Value: 1}, }, - }, + }}, }, - }, + }}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenApmResources(tt.args.in) + var newState modelV0 + diags := flattenApmResources(context.Background(), tt.args.in, &newState.Apm) + assert.Empty(t, diags) + + var got []apmResourceModelV0 + newState.Apm.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) }) } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go index b97e59ed4..e34060acc 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go @@ -18,147 +18,171 @@ package deploymentdatasource import ( + "context" "encoding/json" "fmt" "strconv" "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) // flattenElasticsearchResources takes in Elasticsearch resource models and returns its // flattened form. -func flattenElasticsearchResources(in []*models.ElasticsearchResourceInfo) ([]interface{}, error) { - var result = make([]interface{}, 0, len(in)) +func flattenElasticsearchResources(ctx context.Context, in []*models.ElasticsearchResourceInfo, target interface{}) diag.Diagnostics { + var diags diag.Diagnostics + var result = make([]elasticsearchResourceModelV0, 0, len(in)) + for _, res := range in { - var m = make(map[string]interface{}) + model := elasticsearchResourceModelV0{ + Topology: types.List{ElemType: types.ObjectType{AttrTypes: elasticsearchTopologyAttrTypes()}}, + } if res.RefID != nil { - m["ref_id"] = *res.RefID + model.RefID = types.String{Value: *res.RefID} } if res.Info != nil { if res.Info.Healthy != nil { - m["healthy"] = *res.Info.Healthy + model.Healthy = types.Bool{Value: *res.Info.Healthy} } if res.Info.ClusterID != nil { - m["resource_id"] = *res.Info.ClusterID + model.ResourceID = types.String{Value: *res.Info.ClusterID} } if res.Info.Status != nil { - m["status"] = *res.Info.Status + model.Status = types.String{Value: *res.Info.Status} } if !util.IsCurrentEsPlanEmpty(res) { var plan = res.Info.PlanInfo.Current.Plan if plan.Elasticsearch != nil { - m["version"] = plan.Elasticsearch.Version + model.Version = types.String{Value: plan.Elasticsearch.Version} } if plan.AutoscalingEnabled != nil { - m["autoscale"] = strconv.FormatBool(*plan.AutoscalingEnabled) + model.Autoscale = types.String{Value: strconv.FormatBool(*plan.AutoscalingEnabled)} } - top, err := flattenElasticsearchTopology(plan) - if err != nil { - return nil, err - } - m["topology"] = top + diags.Append(flattenElasticsearchTopology(ctx, plan, &model.Topology)...) } if res.Info.Metadata != nil { - m["cloud_id"] = res.Info.Metadata.CloudID + model.CloudID = types.String{Value: res.Info.Metadata.CloudID} - for k, v := range util.FlattenClusterEndpoint(res.Info.Metadata) { - m[k] = v + endpoints := util.FlattenClusterEndpoint(res.Info.Metadata) + if endpoints != nil { + model.HttpEndpoint = types.String{Value: endpoints["http_endpoint"].(string)} + model.HttpsEndpoint = types.String{Value: endpoints["https_endpoint"].(string)} } } } - result = append(result, m) + + result = append(result, model) } - return result, nil + diags.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: elasticsearchResourceInfoAttrTypes(), + }, + }, target)...) + + return diags } -func flattenElasticsearchTopology(plan *models.ElasticsearchClusterPlan) ([]interface{}, error) { - var result = make([]interface{}, 0, len(plan.ClusterTopology)) +func flattenElasticsearchTopology(ctx context.Context, plan *models.ElasticsearchClusterPlan, target interface{}) diag.Diagnostics { + var diags diag.Diagnostics + var result = make([]elasticsearchTopologyModelV0, 0, len(plan.ClusterTopology)) for _, topology := range plan.ClusterTopology { - var m = make(map[string]interface{}) + model := elasticsearchTopologyModelV0{ + NodeRoles: types.Set{ElemType: types.StringType}, + } - if isSizePopulated(topology) && *topology.Size.Value == 0 { + if isElasticsearchSizePopulated(topology) && *topology.Size.Value == 0 { continue } - m["instance_configuration_id"] = topology.InstanceConfigurationID + model.InstanceConfigurationID = types.String{Value: topology.InstanceConfigurationID} - if isSizePopulated(topology) { - m["size"] = util.MemoryToState(*topology.Size.Value) - m["size_resource"] = *topology.Size.Resource + if isElasticsearchSizePopulated(topology) { + model.Size = types.String{Value: util.MemoryToState(*topology.Size.Value)} + model.SizeResource = types.String{Value: *topology.Size.Resource} } - m["zone_count"] = topology.ZoneCount + model.ZoneCount = types.Int64{Value: int64(topology.ZoneCount)} if topology.NodeType != nil { if topology.NodeType.Data != nil { - m["node_type_data"] = *topology.NodeType.Data + model.NodeTypeData = types.Bool{Value: *topology.NodeType.Data} } if topology.NodeType.Ingest != nil { - m["node_type_ingest"] = *topology.NodeType.Ingest + model.NodeTypeIngest = types.Bool{Value: *topology.NodeType.Ingest} } if topology.NodeType.Master != nil { - m["node_type_master"] = *topology.NodeType.Master + model.NodeTypeMaster = types.Bool{Value: *topology.NodeType.Master} } if topology.NodeType.Ml != nil { - m["node_type_ml"] = *topology.NodeType.Ml + model.NodeTypeMl = types.Bool{Value: *topology.NodeType.Ml} } } if len(topology.NodeRoles) > 0 { - m["node_roles"] = schema.NewSet(schema.HashString, util.StringToItems( - topology.NodeRoles..., - )) + diags.Append(tfsdk.ValueFrom(ctx, util.StringToItems(topology.NodeRoles...), types.SetType{ElemType: types.StringType}, &model.NodeRoles)...) } - autoscaling := make(map[string]interface{}) - if ascale := topology.AutoscalingMax; ascale != nil { - autoscaling["max_size_resource"] = *ascale.Resource - autoscaling["max_size"] = util.MemoryToState(*ascale.Value) + var autoscaling elasticsearchAutoscalingModel + var empty = true + if limit := topology.AutoscalingMax; limit != nil { + autoscaling.MaxSizeResource = types.String{Value: *limit.Resource} + autoscaling.MaxSize = types.String{Value: util.MemoryToState(*limit.Value)} + empty = false } - if ascale := topology.AutoscalingMin; ascale != nil { - autoscaling["min_size_resource"] = *ascale.Resource - autoscaling["min_size"] = util.MemoryToState(*ascale.Value) + if limit := topology.AutoscalingMin; limit != nil { + autoscaling.MinSizeResource = types.String{Value: *limit.Resource} + autoscaling.MinSize = types.String{Value: util.MemoryToState(*limit.Value)} + empty = false } if topology.AutoscalingPolicyOverrideJSON != nil { b, err := json.Marshal(topology.AutoscalingPolicyOverrideJSON) if err != nil { - return nil, fmt.Errorf( - "elasticsearch topology %s: unable to persist policy_override_json: %w", - topology.ID, err, + diags.AddError( + "Invalid elasticsearch topology policy_override_json", + fmt.Sprintf("elasticsearch topology %s: unable to persist policy_override_json: %v", topology.ID, err), ) + } else { + autoscaling.PolicyOverrideJson = types.String{Value: string(b)} + empty = false } - autoscaling["policy_override_json"] = string(b) } - if len(autoscaling) > 0 { - m["autoscaling"] = []interface{}{autoscaling} + if !empty { + diags.Append(tfsdk.ValueFrom(ctx, []elasticsearchAutoscalingModel{autoscaling}, elasticsearchAutoscalingSchema(), &model.Autoscaling)...) } - result = append(result, m) + result = append(result, model) } - return result, nil + diags.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: elasticsearchTopologyAttrTypes(), + }, + }, target)...) + + return diags } -func isSizePopulated(topology *models.ElasticsearchClusterTopologyElement) bool { +func isElasticsearchSizePopulated(topology *models.ElasticsearchClusterTopologyElement) bool { if topology.Size != nil && topology.Size.Value != nil { return true } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go index a251cf143..3b8fcee59 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go @@ -18,11 +18,14 @@ package deploymentdatasource import ( + "context" "testing" "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" ) @@ -33,13 +36,13 @@ func Test_flattenElasticsearchResources(t *testing.T) { tests := []struct { name string args args - want []interface{} + want []elasticsearchResourceModelV0 err string }{ { name: "empty resource list returns empty list", args: args{in: []*models.ElasticsearchResourceInfo{}}, - want: []interface{}{}, + want: []elasticsearchResourceModelV0{}, }, { name: "parses elasticsearch resource", @@ -89,6 +92,11 @@ func Test_flattenElasticsearchResources(t *testing.T) { Resource: ec.String("memory"), Value: ec.Int32(1024), }, + AutoscalingPolicyOverrideJSON: map[string]interface{}{ + "proactive_storage": map[string]interface{}{ + "forecast_window": "3 h", + }, + }, }, { NodeCountPerZone: 1, @@ -112,41 +120,55 @@ func Test_flattenElasticsearchResources(t *testing.T) { }, }, }}, - want: []interface{}{map[string]interface{}{ - "autoscale": "true", - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "cloud_id": "some CLOUD ID", - "http_endpoint": "http://somecluster.cloud.elastic.co:9200", - "https_endpoint": "https://somecluster.cloud.elastic.co:9243", - "healthy": true, - "status": "started", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.data.highio.i3", - "size": "2g", - "size_resource": "memory", - "node_type_data": true, - "node_type_ingest": true, - "node_type_master": true, - "node_type_ml": false, - "zone_count": int32(1), - "autoscaling": []interface{}{map[string]interface{}{ - "max_size": "15g", - "max_size_resource": "memory", - "min_size": "1g", - "min_size_resource": "memory", - }}, - }}, + want: []elasticsearchResourceModelV0{{ + Autoscale: types.String{Value: "true"}, + RefID: types.String{Value: "main-elasticsearch"}, + ResourceID: types.String{Value: mock.ValidClusterID}, + Version: types.String{Value: "7.7.0"}, + CloudID: types.String{Value: "some CLOUD ID"}, + HttpEndpoint: types.String{Value: "http://somecluster.cloud.elastic.co:9200"}, + HttpsEndpoint: types.String{Value: "https://somecluster.cloud.elastic.co:9243"}, + Healthy: types.Bool{Value: true}, + Status: types.String{Value: "started"}, + Topology: types.List{ElemType: types.ObjectType{AttrTypes: elasticsearchTopologyAttrTypes()}, + Elems: []attr.Value{types.Object{ + AttrTypes: elasticsearchTopologyAttrTypes(), + Attrs: map[string]attr.Value{ + "instance_configuration_id": types.String{Value: "aws.data.highio.i3"}, + "size": types.String{Value: "2g"}, + "size_resource": types.String{Value: "memory"}, + "node_type_data": types.Bool{Value: true}, + "node_type_ingest": types.Bool{Value: true}, + "node_type_master": types.Bool{Value: true}, + "node_type_ml": types.Bool{Value: false}, + "node_roles": types.Set{ElemType: types.StringType, Elems: []attr.Value{}}, + "zone_count": types.Int64{Value: 1}, + "autoscaling": types.List{ElemType: types.ObjectType{AttrTypes: elasticsearchAutoscalingAttrTypes()}, + Elems: []attr.Value{types.Object{ + AttrTypes: elasticsearchAutoscalingAttrTypes(), + Attrs: map[string]attr.Value{ + "max_size": types.String{Value: "15g"}, + "max_size_resource": types.String{Value: "memory"}, + "min_size": types.String{Value: "1g"}, + "min_size_resource": types.String{Value: "memory"}, + "policy_override_json": types.String{Value: "{\"proactive_storage\":{\"forecast_window\":\"3 h\"}}"}, + }}, + }, + }, + }}, + }, + }, }}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := flattenElasticsearchResources(tt.args.in) - if err != nil && assert.EqualError(t, err, tt.err) { - t.Error(err) - } + var model modelV0 + diags := flattenElasticsearchResources(context.Background(), tt.args.in, &model.Elasticsearch) + assert.Empty(t, diags) + + var got []elasticsearchResourceModelV0 + model.Elasticsearch.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) }) } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search.go b/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search.go index 1e0efb9c1..1fbbb6a59 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search.go @@ -18,97 +18,121 @@ package deploymentdatasource import ( + "context" "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) // flattenEnterpriseSearchResources takes in EnterpriseSearch resource models and returns its // flattened form. -func flattenEnterpriseSearchResources(in []*models.EnterpriseSearchResourceInfo) []interface{} { - var result = make([]interface{}, 0, len(in)) - for _, res := range in { - var m = make(map[string]interface{}) +func flattenEnterpriseSearchResources(ctx context.Context, in []*models.EnterpriseSearchResourceInfo, target interface{}) diag.Diagnostics { + var diags diag.Diagnostics + var result = make([]enterpriseSearchResourceModelV0, 0, len(in)) - if res.RefID != nil { - m["ref_id"] = *res.RefID + for _, res := range in { + model := enterpriseSearchResourceModelV0{ + Topology: types.List{ElemType: types.ObjectType{AttrTypes: enterpriseSearchTopologyAttrTypes()}}, } if res.ElasticsearchClusterRefID != nil { - m["elasticsearch_cluster_ref_id"] = *res.ElasticsearchClusterRefID + model.ElasticsearchClusterRefID = types.String{Value: *res.ElasticsearchClusterRefID} + } + + if res.RefID != nil { + model.RefID = types.String{Value: *res.RefID} } if res.Info != nil { if res.Info.Healthy != nil { - m["healthy"] = *res.Info.Healthy + model.Healthy = types.Bool{Value: *res.Info.Healthy} } if res.Info.ID != nil { - m["resource_id"] = *res.Info.ID + model.ResourceID = types.String{Value: *res.Info.ID} } if res.Info.Status != nil { - m["status"] = *res.Info.Status + model.Status = types.String{Value: *res.Info.Status} } if !util.IsCurrentEssPlanEmpty(res) { var plan = res.Info.PlanInfo.Current.Plan if plan.EnterpriseSearch != nil { - m["version"] = plan.EnterpriseSearch.Version + model.Version = types.String{Value: plan.EnterpriseSearch.Version} } - m["topology"] = flattenEnterpriseSearchTopology(plan) + diags.Append(flattenEnterpriseSearchTopology(ctx, plan, &model.Topology)...) } if res.Info.Metadata != nil { - for k, v := range util.FlattenClusterEndpoint(res.Info.Metadata) { - m[k] = v + endpoints := util.FlattenClusterEndpoint(res.Info.Metadata) + if endpoints != nil { + model.HttpEndpoint = types.String{Value: endpoints["http_endpoint"].(string)} + model.HttpsEndpoint = types.String{Value: endpoints["https_endpoint"].(string)} } } } - result = append(result, m) + + result = append(result, model) } - return result + diags.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: enterpriseSearchResourceInfoAttrTypes(), + }, + }, target)...) + + return diags } -func flattenEnterpriseSearchTopology(plan *models.EnterpriseSearchPlan) []interface{} { - var result = make([]interface{}, 0, len(plan.ClusterTopology)) +func flattenEnterpriseSearchTopology(ctx context.Context, plan *models.EnterpriseSearchPlan, target interface{}) diag.Diagnostics { + var diags diag.Diagnostics + var result = make([]enterpriseSearchTopologyModelV0, 0, len(plan.ClusterTopology)) for _, topology := range plan.ClusterTopology { - var m = make(map[string]interface{}) + var model enterpriseSearchTopologyModelV0 if isEsSizePopulated(topology) && *topology.Size.Value == 0 { continue } - m["instance_configuration_id"] = topology.InstanceConfigurationID - - m["zone_count"] = topology.ZoneCount + model.InstanceConfigurationID = types.String{Value: topology.InstanceConfigurationID} if isEsSizePopulated(topology) { - m["size"] = util.MemoryToState(*topology.Size.Value) - m["size_resource"] = *topology.Size.Resource + model.Size = types.String{Value: util.MemoryToState(*topology.Size.Value)} + model.SizeResource = types.String{Value: *topology.Size.Resource} } + model.ZoneCount = types.Int64{Value: int64(topology.ZoneCount)} + if topology.NodeType != nil { if topology.NodeType.Appserver != nil { - m["node_type_appserver"] = *topology.NodeType.Appserver + model.NodeTypeAppserver = types.Bool{Value: *topology.NodeType.Appserver} } if topology.NodeType.Connector != nil { - m["node_type_connector"] = *topology.NodeType.Connector + model.NodeTypeConnector = types.Bool{Value: *topology.NodeType.Connector} } if topology.NodeType.Worker != nil { - m["node_type_worker"] = *topology.NodeType.Worker + model.NodeTypeWorker = types.Bool{Value: *topology.NodeType.Worker} } } - result = append(result, m) + result = append(result, model) } - return result + diags.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: enterpriseSearchTopologyAttrTypes(), + }, + }, target)...) + + return diags } func isEsSizePopulated(topology *models.EnterpriseSearchTopologyElement) bool { diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go index 53dec0777..64baf24e8 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go @@ -18,11 +18,14 @@ package deploymentdatasource import ( + "context" "testing" "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" ) @@ -33,12 +36,12 @@ func Test_flattenEnterpriseSearchResource(t *testing.T) { tests := []struct { name string args args - want []interface{} + want []enterpriseSearchResourceModelV0 }{ { name: "empty resource list returns empty list", args: args{in: []*models.EnterpriseSearchResourceInfo{}}, - want: []interface{}{}, + want: []enterpriseSearchResourceModelV0{}, }, { name: "parses the enterprisesearch resource", @@ -96,31 +99,41 @@ func Test_flattenEnterpriseSearchResource(t *testing.T) { }, }, }}, - want: []interface{}{ - map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-enterprise_search", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "http_endpoint": "http://enterprisesearchresource.cloud.elastic.co:9200", - "https_endpoint": "https://enterprisesearchresource.cloud.elastic.co:9243", - "healthy": true, - "status": "started", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.enterprisesearch.r4", - "size": "1g", - "size_resource": "memory", - "zone_count": int32(1), - "node_type_appserver": true, - "node_type_worker": false, - }}, - }, + want: []enterpriseSearchResourceModelV0{{ + ElasticsearchClusterRefID: types.String{Value: "main-elasticsearch"}, + RefID: types.String{Value: "main-enterprise_search"}, + ResourceID: types.String{Value: mock.ValidClusterID}, + Version: types.String{Value: "7.7.0"}, + HttpEndpoint: types.String{Value: "http://enterprisesearchresource.cloud.elastic.co:9200"}, + HttpsEndpoint: types.String{Value: "https://enterprisesearchresource.cloud.elastic.co:9243"}, + Healthy: types.Bool{Value: true}, + Status: types.String{Value: "started"}, + Topology: types.List{ElemType: types.ObjectType{AttrTypes: enterpriseSearchTopologyAttrTypes()}, + Elems: []attr.Value{types.Object{ + AttrTypes: enterpriseSearchTopologyAttrTypes(), + Attrs: map[string]attr.Value{ + "instance_configuration_id": types.String{Value: "aws.enterprisesearch.r4"}, + "size": types.String{Value: "1g"}, + "size_resource": types.String{Value: "memory"}, + "zone_count": types.Int64{Value: 1}, + "node_type_appserver": types.Bool{Value: true}, + "node_type_connector": types.Bool{Value: false}, + "node_type_worker": types.Bool{Value: false}, + }, + }, + }, + }}, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenEnterpriseSearchResources(tt.args.in) + var model modelV0 + diags := flattenEnterpriseSearchResources(context.Background(), tt.args.in, &model.EnterpriseSearch) + assert.Empty(t, diags) + + var got []enterpriseSearchResourceModelV0 + model.EnterpriseSearch.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) }) } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server.go b/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server.go index 70a3f6314..57b8ec910 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server.go @@ -18,84 +18,107 @@ package deploymentdatasource import ( + "context" "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) // flattenIntegrationsServerResources takes in IntegrationsServer resource models and returns its // flattened form. -func flattenIntegrationsServerResources(in []*models.IntegrationsServerResourceInfo) []interface{} { - var result = make([]interface{}, 0, len(in)) +func flattenIntegrationsServerResources(ctx context.Context, in []*models.IntegrationsServerResourceInfo, target interface{}) diag.Diagnostics { + var diags diag.Diagnostics + var result = make([]integrationsServerResourceModelV0, 0, len(in)) + for _, res := range in { - var m = make(map[string]interface{}) + model := integrationsServerResourceModelV0{ + Topology: types.List{ElemType: types.ObjectType{AttrTypes: integrationsServerTopologyAttrTypes()}}, + } if res.ElasticsearchClusterRefID != nil { - m["elasticsearch_cluster_ref_id"] = *res.ElasticsearchClusterRefID + model.ElasticsearchClusterRefID = types.String{Value: *res.ElasticsearchClusterRefID} } if res.RefID != nil { - m["ref_id"] = *res.RefID + model.RefID = types.String{Value: *res.RefID} } if res.Info != nil { if res.Info.Healthy != nil { - m["healthy"] = *res.Info.Healthy + model.Healthy = types.Bool{Value: *res.Info.Healthy} } if res.Info.ID != nil { - m["resource_id"] = *res.Info.ID + model.ResourceID = types.String{Value: *res.Info.ID} } if res.Info.Status != nil { - m["status"] = *res.Info.Status + model.Status = types.String{Value: *res.Info.Status} } if !util.IsCurrentIntegrationsServerPlanEmpty(res) { var plan = res.Info.PlanInfo.Current.Plan if plan.IntegrationsServer != nil { - m["version"] = plan.IntegrationsServer.Version + model.Version = types.String{Value: plan.IntegrationsServer.Version} } - m["topology"] = flattenIntegrationsServerTopology(plan) + diags.Append(flattenIntegrationsServerTopology(ctx, plan, &model.Topology)...) } if res.Info.Metadata != nil { - for k, v := range util.FlattenClusterEndpoint(res.Info.Metadata) { - m[k] = v + endpoints := util.FlattenClusterEndpoint(res.Info.Metadata) + if endpoints != nil { + model.HttpEndpoint = types.String{Value: endpoints["http_endpoint"].(string)} + model.HttpsEndpoint = types.String{Value: endpoints["https_endpoint"].(string)} } } } - result = append(result, m) + result = append(result, model) } - return result + diags.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: integrationsServerResourceInfoAttrTypes(), + }, + }, target)...) + + return diags } -func flattenIntegrationsServerTopology(plan *models.IntegrationsServerPlan) []interface{} { - var result = make([]interface{}, 0, len(plan.ClusterTopology)) +func flattenIntegrationsServerTopology(ctx context.Context, plan *models.IntegrationsServerPlan, target interface{}) diag.Diagnostics { + var diags diag.Diagnostics + var result = make([]integrationsServerTopologyModelV0, 0, len(plan.ClusterTopology)) for _, topology := range plan.ClusterTopology { - var m = make(map[string]interface{}) + var model integrationsServerTopologyModelV0 if isIntegrationsServerSizePopulated(topology) && *topology.Size.Value == 0 { continue } - m["instance_configuration_id"] = topology.InstanceConfigurationID + model.InstanceConfigurationID = types.String{Value: topology.InstanceConfigurationID} if isIntegrationsServerSizePopulated(topology) { - m["size"] = util.MemoryToState(*topology.Size.Value) - m["size_resource"] = *topology.Size.Resource + model.Size = types.String{Value: util.MemoryToState(*topology.Size.Value)} + model.SizeResource = types.String{Value: *topology.Size.Resource} } - m["zone_count"] = topology.ZoneCount + model.ZoneCount = types.Int64{Value: int64(topology.ZoneCount)} - result = append(result, m) + result = append(result, model) } - return result + diags.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: apmTopologyAttrTypes(), + }, + }, target)...) + + return diags } func isIntegrationsServerSizePopulated(topology *models.IntegrationsServerTopologyElement) bool { diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go index dd8576516..7fff8dbff 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go @@ -18,11 +18,14 @@ package deploymentdatasource import ( + "context" "testing" "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" ) @@ -33,12 +36,12 @@ func Test_flattenIntegrationsServerResource(t *testing.T) { tests := []struct { name string args args - want []interface{} + want []integrationsServerResourceModelV0 }{ { name: "empty resource list returns empty list", args: args{in: []*models.IntegrationsServerResourceInfo{}}, - want: []interface{}{}, + want: []integrationsServerResourceModelV0{}, }, { name: "parses the integrations_server resource", @@ -57,59 +60,67 @@ func Test_flattenIntegrationsServerResource(t *testing.T) { HTTPS: ec.Int32(9243), }, }, - PlanInfo: &models.IntegrationsServerPlansInfo{Current: &models.IntegrationsServerPlanInfo{ - Plan: &models.IntegrationsServerPlan{ - IntegrationsServer: &models.IntegrationsServerConfiguration{ - Version: "8.0.0", - }, - ClusterTopology: []*models.IntegrationsServerTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.integrations_server.r4", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, + PlanInfo: &models.IntegrationsServerPlansInfo{ + Current: &models.IntegrationsServerPlanInfo{ + Plan: &models.IntegrationsServerPlan{ + IntegrationsServer: &models.IntegrationsServerConfiguration{ + Version: "8.0.0", }, - { - ZoneCount: 1, - InstanceConfigurationID: "aws.integrations_server.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), + ClusterTopology: []*models.IntegrationsServerTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.integrations_server.r4", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + { + ZoneCount: 1, + InstanceConfigurationID: "aws.integrations_server.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, }, }, }, }, - }}, + }, }, }, }}, - want: []interface{}{ - map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-integrations_server", - "resource_id": mock.ValidClusterID, - "version": "8.0.0", - "http_endpoint": "http://integrations_serverresource.cloud.elastic.co:9200", - "https_endpoint": "https://integrations_serverresource.cloud.elastic.co:9243", - "healthy": true, - "status": "started", - "topology": []interface{}{ - map[string]interface{}{ - "instance_configuration_id": "aws.integrations_server.r4", - "size": "1g", - "size_resource": "memory", - "zone_count": int32(1), + want: []integrationsServerResourceModelV0{{ + ElasticsearchClusterRefID: types.String{Value: "main-elasticsearch"}, + RefID: types.String{Value: "main-integrations_server"}, + ResourceID: types.String{Value: mock.ValidClusterID}, + Version: types.String{Value: "8.0.0"}, + HttpEndpoint: types.String{Value: "http://integrations_serverresource.cloud.elastic.co:9200"}, + HttpsEndpoint: types.String{Value: "https://integrations_serverresource.cloud.elastic.co:9243"}, + Healthy: types.Bool{Value: true}, + Status: types.String{Value: "started"}, + Topology: types.List{ElemType: types.ObjectType{AttrTypes: integrationsServerTopologyAttrTypes()}, + Elems: []attr.Value{types.Object{ + AttrTypes: integrationsServerTopologyAttrTypes(), + Attrs: map[string]attr.Value{ + "instance_configuration_id": types.String{Value: "aws.integrations_server.r4"}, + "size": types.String{Value: "1g"}, + "size_resource": types.String{Value: "memory"}, + "zone_count": types.Int64{Value: 1}, }, - }, + }}, }, - }, + }}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenIntegrationsServerResources(tt.args.in) + var newState modelV0 + diags := flattenIntegrationsServerResources(context.Background(), tt.args.in, &newState.IntegrationsServer) + assert.Empty(t, diags) + + var got []integrationsServerResourceModelV0 + newState.IntegrationsServer.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) }) } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_kibana.go b/ec/ecdatasource/deploymentdatasource/flatteners_kibana.go index 50d1f800d..3c27f808f 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_kibana.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_kibana.go @@ -18,83 +18,107 @@ package deploymentdatasource import ( + "context" "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) // flattenKibanaResources takes in Kibana resource models and returns its // flattened form. -func flattenKibanaResources(in []*models.KibanaResourceInfo) []interface{} { - var result = make([]interface{}, 0, len(in)) - for _, res := range in { - var m = make(map[string]interface{}) +func flattenKibanaResources(ctx context.Context, in []*models.KibanaResourceInfo, target interface{}) diag.Diagnostics { + var diags diag.Diagnostics + var result = make([]kibanaResourceModelV0, 0, len(in)) - if res.RefID != nil { - m["ref_id"] = *res.RefID + for _, res := range in { + model := kibanaResourceModelV0{ + Topology: types.List{ElemType: types.ObjectType{AttrTypes: kibanaTopologyAttrTypes()}}, } if res.ElasticsearchClusterRefID != nil { - m["elasticsearch_cluster_ref_id"] = *res.ElasticsearchClusterRefID + model.ElasticsearchClusterRefID = types.String{Value: *res.ElasticsearchClusterRefID} + } + + if res.RefID != nil { + model.RefID = types.String{Value: *res.RefID} } if res.Info != nil { if res.Info.Healthy != nil { - m["healthy"] = *res.Info.Healthy + model.Healthy = types.Bool{Value: *res.Info.Healthy} } if res.Info.ClusterID != nil { - m["resource_id"] = *res.Info.ClusterID + model.ResourceID = types.String{Value: *res.Info.ClusterID} } if res.Info.Status != nil { - m["status"] = *res.Info.Status + model.Status = types.String{Value: *res.Info.Status} } if !util.IsCurrentKibanaPlanEmpty(res) { var plan = res.Info.PlanInfo.Current.Plan if plan.Kibana != nil { - m["version"] = plan.Kibana.Version + model.Version = types.String{Value: plan.Kibana.Version} } - m["topology"] = flattenKibanaTopology(plan) + diags.Append(flattenKibanaTopology(ctx, plan, &model.Topology)...) } if res.Info.Metadata != nil { - for k, v := range util.FlattenClusterEndpoint(res.Info.Metadata) { - m[k] = v + endpoints := util.FlattenClusterEndpoint(res.Info.Metadata) + if endpoints != nil { + model.HttpEndpoint = types.String{Value: endpoints["http_endpoint"].(string)} + model.HttpsEndpoint = types.String{Value: endpoints["https_endpoint"].(string)} } } } - result = append(result, m) + + result = append(result, model) } - return result + diags.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: kibanaResourceInfoAttrTypes(), + }, + }, target)...) + + return diags } -func flattenKibanaTopology(plan *models.KibanaClusterPlan) []interface{} { - var result = make([]interface{}, 0, len(plan.ClusterTopology)) +func flattenKibanaTopology(ctx context.Context, plan *models.KibanaClusterPlan, target interface{}) diag.Diagnostics { + var diags diag.Diagnostics + var result = make([]kibanaTopologyModelV0, 0, len(plan.ClusterTopology)) for _, topology := range plan.ClusterTopology { - var m = make(map[string]interface{}) + var model kibanaTopologyModelV0 if isKibanaSizePopulated(topology) && *topology.Size.Value == 0 { continue } - m["instance_configuration_id"] = topology.InstanceConfigurationID + model.InstanceConfigurationID = types.String{Value: topology.InstanceConfigurationID} if isKibanaSizePopulated(topology) { - m["size"] = util.MemoryToState(*topology.Size.Value) - m["size_resource"] = *topology.Size.Resource + model.Size = types.String{Value: util.MemoryToState(*topology.Size.Value)} + model.SizeResource = types.String{Value: *topology.Size.Resource} } - m["zone_count"] = topology.ZoneCount + model.ZoneCount = types.Int64{Value: int64(topology.ZoneCount)} - result = append(result, m) + result = append(result, model) } - return result + diags.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: kibanaTopologyAttrTypes(), + }, + }, target)...) + + return diags } func isKibanaSizePopulated(topology *models.KibanaClusterTopologyElement) bool { diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go index f3d3c41f3..2faea9f6a 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go @@ -18,11 +18,14 @@ package deploymentdatasource import ( + "context" "testing" "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" ) @@ -33,12 +36,12 @@ func Test_flattenKibanaResources(t *testing.T) { tests := []struct { name string args args - want []interface{} + want []kibanaResourceModelV0 }{ { name: "empty resource list returns empty list", args: args{in: []*models.KibanaResourceInfo{}}, - want: []interface{}{}, + want: []kibanaResourceModelV0{}, }, { name: "parses the kibana resource", @@ -87,31 +90,36 @@ func Test_flattenKibanaResources(t *testing.T) { }, }, }}, - want: []interface{}{ - map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "http_endpoint": "http://kibanaresource.cloud.elastic.co:9200", - "https_endpoint": "https://kibanaresource.cloud.elastic.co:9243", - "healthy": true, - "status": "started", - "topology": []interface{}{ - map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r4", - "size": "1g", - "size_resource": "memory", - "zone_count": int32(1), + want: []kibanaResourceModelV0{{ + ElasticsearchClusterRefID: types.String{Value: "main-elasticsearch"}, + RefID: types.String{Value: "main-kibana"}, + ResourceID: types.String{Value: mock.ValidClusterID}, + Version: types.String{Value: "7.7.0"}, + HttpEndpoint: types.String{Value: "http://kibanaresource.cloud.elastic.co:9200"}, + HttpsEndpoint: types.String{Value: "https://kibanaresource.cloud.elastic.co:9243"}, + Healthy: types.Bool{Value: true}, + Status: types.String{Value: "started"}, + Topology: types.List{ElemType: types.ObjectType{AttrTypes: kibanaTopologyAttrTypes()}, + Elems: []attr.Value{types.Object{ + AttrTypes: kibanaTopologyAttrTypes(), + Attrs: map[string]attr.Value{ + "instance_configuration_id": types.String{Value: "aws.kibana.r4"}, + "size": types.String{Value: "1g"}, + "size_resource": types.String{Value: "memory"}, + "zone_count": types.Int64{Value: 1}, }, - }, - }, + }}}, + }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenKibanaResources(tt.args.in) + var model modelV0 + diags := flattenKibanaResources(context.Background(), tt.args.in, &model.Kibana) + assert.Empty(t, diags) + var got []kibanaResourceModelV0 + model.Kibana.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) }) } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_observability.go b/ec/ecdatasource/deploymentdatasource/flatteners_observability.go index 53da35e1a..507ba7607 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_observability.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_observability.go @@ -17,33 +17,52 @@ package deploymentdatasource -import "github.com/elastic/cloud-sdk-go/pkg/models" +import ( + "context" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) // flattenObservability parses a deployment's observability settings. -func flattenObservability(settings *models.DeploymentSettings) []interface{} { - if settings == nil || settings.Observability == nil { - return nil +func flattenObservability(ctx context.Context, settings *models.DeploymentSettings, target interface{}) diag.Diagnostics { + var diags diag.Diagnostics + model := observabilitySettingsModel{ + Metrics: types.Bool{Value: false}, + Logs: types.Bool{Value: false}, } + empty := true - var m = make(map[string]interface{}) + if settings == nil || settings.Observability == nil { + return diags + } // We are only accepting a single deployment ID and refID for both logs and metrics. // If either of them is not nil the deployment ID and refID will be filled. if settings.Observability.Metrics != nil { - m["deployment_id"] = settings.Observability.Metrics.Destination.DeploymentID - m["ref_id"] = settings.Observability.Metrics.Destination.RefID - m["metrics"] = true + model.DeploymentID = types.String{Value: *settings.Observability.Metrics.Destination.DeploymentID} + model.RefID = types.String{Value: settings.Observability.Metrics.Destination.RefID} + model.Metrics = types.Bool{Value: true} + empty = false } if settings.Observability.Logging != nil { - m["deployment_id"] = settings.Observability.Logging.Destination.DeploymentID - m["ref_id"] = settings.Observability.Logging.Destination.RefID - m["logs"] = true + model.DeploymentID = types.String{Value: *settings.Observability.Logging.Destination.DeploymentID} + model.RefID = types.String{Value: settings.Observability.Logging.Destination.RefID} + model.Logs = types.Bool{Value: true} + empty = false } - if len(m) == 0 { - return nil + if empty { + return diags } - return []interface{}{m} + diags.Append(tfsdk.ValueFrom(ctx, []observabilitySettingsModel{model}, types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: observabilitySettingsAttrTypes(), + }, + }, target)...) + + return diags } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_observability_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_observability_test.go index eb2e8d52d..4da04c60c 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_observability_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_observability_test.go @@ -18,10 +18,12 @@ package deploymentdatasource import ( + "context" "testing" "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" ) @@ -32,7 +34,7 @@ func TestFlattenObservability(t *testing.T) { tests := []struct { name string args args - want []interface{} + want []observabilitySettingsModel }{ { name: "flattens no observability settings when empty", @@ -58,10 +60,11 @@ func TestFlattenObservability(t *testing.T) { }, }, }}, - want: []interface{}{map[string]interface{}{ - "deployment_id": &mock.ValidClusterID, - "ref_id": "main-elasticsearch", - "logs": true, + want: []observabilitySettingsModel{{ + DeploymentID: types.String{Value: mock.ValidClusterID}, + RefID: types.String{Value: "main-elasticsearch"}, + Logs: types.Bool{Value: true}, + Metrics: types.Bool{Value: false}, }}, }, { @@ -76,10 +79,11 @@ func TestFlattenObservability(t *testing.T) { }, }, }}, - want: []interface{}{map[string]interface{}{ - "deployment_id": &mock.ValidClusterID, - "ref_id": "main-elasticsearch", - "metrics": true, + want: []observabilitySettingsModel{{ + DeploymentID: types.String{Value: mock.ValidClusterID}, + RefID: types.String{Value: "main-elasticsearch"}, + Logs: types.Bool{Value: false}, + Metrics: types.Bool{Value: true}, }}, }, { @@ -100,17 +104,21 @@ func TestFlattenObservability(t *testing.T) { }, }, }}, - want: []interface{}{map[string]interface{}{ - "deployment_id": &mock.ValidClusterID, - "ref_id": "main-elasticsearch", - "logs": true, - "metrics": true, + want: []observabilitySettingsModel{{ + DeploymentID: types.String{Value: mock.ValidClusterID}, + RefID: types.String{Value: "main-elasticsearch"}, + Logs: types.Bool{Value: true}, + Metrics: types.Bool{Value: true}, }}, }, } for _, tt := range tests { + var newState modelV0 t.Run(tt.name, func(t *testing.T) { - got := flattenObservability(tt.args.settings) + diags := flattenObservability(context.Background(), tt.args.settings, &newState.Observability) + assert.Empty(t, diags) + var got []observabilitySettingsModel + newState.Observability.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) }) } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_tags.go b/ec/ecdatasource/deploymentdatasource/flatteners_tags.go index 32964d512..1605b0dec 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_tags.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_tags.go @@ -19,20 +19,24 @@ package deploymentdatasource import ( "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" ) // flattenTags takes in Deployment Metadata resource models and returns its // Tags in flattened form. -func flattenTags(metadata *models.DeploymentMetadata) map[string]interface{} { +func flattenTags(metadata *models.DeploymentMetadata) types.Map { + if metadata == nil || metadata.Tags == nil { - return nil + return types.Map{ElemType: types.StringType, Elems: map[string]attr.Value{}} } - var result = make(map[string]interface{}) + var tags = make(map[string]attr.Value) for _, res := range metadata.Tags { if res.Key != nil { - result[*res.Key] = *res.Value + tags[*res.Key] = types.String{Value: *res.Value} } } - return result + return types.Map{ElemType: types.StringType, Elems: tags} + } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_tags_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_tags_test.go index 5e28d218c..6bc722462 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_tags_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_tags_test.go @@ -18,6 +18,7 @@ package deploymentdatasource import ( + "context" "testing" "github.com/elastic/cloud-sdk-go/pkg/models" @@ -32,15 +33,17 @@ func TestFlattenTags(t *testing.T) { tests := []struct { name string args args - want map[string]interface{} + want map[string]string }{ { name: "flattens no metadata tags when empty", args: args{}, + want: map[string]string{}, }, { name: "flattens no metadata tags when empty", args: args{metadata: &models.DeploymentMetadata{}}, + want: map[string]string{}, }, { name: "flatten metadata tags", @@ -52,7 +55,7 @@ func TestFlattenTags(t *testing.T) { }, }, }}, - want: map[string]interface{}{"foo": "bar"}, + want: map[string]string{"foo": "bar"}, }, { name: "flatten metadata tags", @@ -68,12 +71,14 @@ func TestFlattenTags(t *testing.T) { }, }, }}, - want: map[string]interface{}{"foo": "bar", "bar": "baz"}, + want: map[string]string{"foo": "bar", "bar": "baz"}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenTags(tt.args.metadata) + result := flattenTags(tt.args.metadata) + got := make(map[string]string, len(result.Elems)) + result.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) }) } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter.go b/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter.go index 18fda3a88..51851accc 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter.go @@ -18,19 +18,20 @@ package deploymentdatasource import ( + "context" "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" ) // flattenTrafficFiltering parses a deployment's traffic filtering settings. -func flattenTrafficFiltering(settings *models.DeploymentSettings) []interface{} { +func flattenTrafficFiltering(ctx context.Context, settings *models.DeploymentSettings, target interface{}) diag.Diagnostics { + var diags diag.Diagnostics if settings == nil || settings.TrafficFilterSettings == nil { - return nil + return diags } - var rules []interface{} - for _, rule := range settings.TrafficFilterSettings.Rulesets { - rules = append(rules, rule) - } - - return rules + diags.Append(tfsdk.ValueFrom(ctx, settings.TrafficFilterSettings.Rulesets, types.ListType{ElemType: types.StringType}, target)...) + return diags } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter_test.go index 557e0639a..aa7a81d33 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter_test.go @@ -18,6 +18,7 @@ package deploymentdatasource import ( + "context" "testing" "github.com/elastic/cloud-sdk-go/pkg/models" @@ -31,7 +32,7 @@ func Test_flattenTrafficFiltering(t *testing.T) { tests := []struct { name string args args - want []interface{} + want []string }{ { name: "parses no rules when they're empty", @@ -54,9 +55,10 @@ func Test_flattenTrafficFiltering(t *testing.T) { Rulesets: []string{}, }, }}, + want: []string{}, }, { - name: "parses no rules when they're empty", + name: "parses rules", args: args{settings: &models.DeploymentSettings{ TrafficFilterSettings: &models.TrafficFilterSettings{ Rulesets: []string{ @@ -65,7 +67,7 @@ func Test_flattenTrafficFiltering(t *testing.T) { }, }, }}, - want: []interface{}{ + want: []string{ "one-id-of-a-rule", "another-id-of-another-rule", }, @@ -73,7 +75,11 @@ func Test_flattenTrafficFiltering(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenTrafficFiltering(tt.args.settings) + var newState modelV0 + diags := flattenTrafficFiltering(context.Background(), tt.args.settings, &newState.TrafficFilter) + assert.Empty(t, diags) + var got []string + newState.TrafficFilter.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) }) } diff --git a/ec/ecdatasource/deploymentdatasource/schema.go b/ec/ecdatasource/deploymentdatasource/schema.go index b8a42b87a..53d2fc73d 100644 --- a/ec/ecdatasource/deploymentdatasource/schema.go +++ b/ec/ecdatasource/deploymentdatasource/schema.go @@ -18,103 +18,72 @@ package deploymentdatasource import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "context" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" ) -func newSchema() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "alias": { - Type: schema.TypeString, - Computed: true, - }, - "healthy": { - Type: schema.TypeBool, - Computed: true, - }, - "id": { - Type: schema.TypeString, - Required: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - "region": { - Type: schema.TypeString, - Computed: true, - }, - "deployment_template_id": { - Type: schema.TypeString, - Computed: true, - }, - "traffic_filter": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, +func (s DataSourceType) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { + return tfsdk.Schema{ + Attributes: map[string]tfsdk.Attribute{ + "alias": { + Type: types.StringType, + Computed: true, }, - }, - "observability": { - Type: schema.TypeList, - Computed: true, - Elem: newObservabilitySettings(), - }, - "tags": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + "healthy": { + Type: types.BoolType, + Computed: true, }, - }, - - // Deployment resources - "elasticsearch": { - Type: schema.TypeList, - Computed: true, - Elem: newElasticsearchResourceInfo(), - }, - "kibana": { - Type: schema.TypeList, - Computed: true, - Elem: newKibanaResourceInfo(), - }, - "apm": { - Type: schema.TypeList, - Computed: true, - Elem: newApmResourceInfo(), - }, - "integrations_server": { - Type: schema.TypeList, - Computed: true, - Elem: newIntegrationsServerResourceInfo(), - }, - "enterprise_search": { - Type: schema.TypeList, - Computed: true, - Elem: newEnterpriseSearchResourceInfo(), - }, - } -} - -func newObservabilitySettings() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "deployment_id": { - Type: schema.TypeString, + "id": { + Type: types.StringType, + Required: true, + }, + "name": { + Type: types.StringType, + Computed: true, + }, + "region": { + Type: types.StringType, Computed: true, }, - "ref_id": { - Type: schema.TypeString, + "deployment_template_id": { + Type: types.StringType, Computed: true, }, - "logs": { - Type: schema.TypeBool, + "traffic_filter": { + Type: types.ListType{ElemType: types.StringType}, Computed: true, }, - "metrics": { - Type: schema.TypeBool, + "observability": observabilitySettingsSchema(), + "tags": { + Type: types.MapType{ElemType: types.StringType}, Computed: true, }, + + // Deployment resources + "elasticsearch": elasticsearchResourceInfoSchema(), + "kibana": kibanaResourceInfoSchema(), + "apm": apmResourceInfoSchema(), + "integrations_server": integrationsServerResourceInfoSchema(), + "enterprise_search": enterpriseSearchResourceInfoSchema(), }, - } + }, nil +} + +type modelV0 struct { + Alias types.String `tfsdk:"alias"` + Healthy types.Bool `tfsdk:"healthy"` + ID types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + Region types.String `tfsdk:"region"` + DeploymentTemplateID types.String `tfsdk:"deployment_template_id"` + TrafficFilter types.List `tfsdk:"traffic_filter"` //< string + Observability types.List `tfsdk:"observability"` //< observabilitySettingsModel + Tags types.Map `tfsdk:"tags"` //< string + Elasticsearch types.List `tfsdk:"elasticsearch"` //< elasticsearchResourceModelV0 + Kibana types.List `tfsdk:"kibana"` //< kibanaResourceModelV0 + Apm types.List `tfsdk:"apm"` //< apmResourceModelV0 + IntegrationsServer types.List `tfsdk:"integrations_server"` //< integrationsServerResourceModelV0 + EnterpriseSearch types.List `tfsdk:"enterprise_search"` //< enterpriseSearchResourceModelV0 } diff --git a/ec/ecdatasource/deploymentdatasource/schema_apm.go b/ec/ecdatasource/deploymentdatasource/schema_apm.go index 3cda06270..2030c6500 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_apm.go +++ b/ec/ecdatasource/deploymentdatasource/schema_apm.go @@ -18,72 +18,64 @@ package deploymentdatasource import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" ) -func newApmResourceInfo() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "elasticsearch_cluster_ref_id": { - Type: schema.TypeString, - Computed: true, - }, - "healthy": { - Type: schema.TypeBool, - Computed: true, - }, - "http_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "https_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "ref_id": { - Type: schema.TypeString, - Computed: true, - }, - "resource_id": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "version": { - Type: schema.TypeString, - Computed: true, - }, - "topology": apmTopologySchema(), - }, +func apmResourceInfoSchema() tfsdk.Attribute { + // TODO should we use tfsdk.ListNestedAttributes here? - see https://github.com/hashicorp/terraform-provider-hashicups-pf/blob/8f222d805d39445673e442a674168349a45bc054/hashicups/data_source_coffee.go#L22 + return tfsdk.Attribute{ + Computed: true, + Type: types.ListType{ElemType: types.ObjectType{ + AttrTypes: apmResourceInfoAttrTypes(), + }}, } } -func apmTopologySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_configuration_id": { - Type: schema.TypeString, - Computed: true, - }, - "size": { - Type: schema.TypeString, - Computed: true, - }, - "size_resource": { - Type: schema.TypeString, - Computed: true, - }, - "zone_count": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, +func apmResourceInfoAttrTypes() map[string]attr.Type { + return map[string]attr.Type{ + "elasticsearch_cluster_ref_id": types.StringType, + "healthy": types.BoolType, + "http_endpoint": types.StringType, + "https_endpoint": types.StringType, + "ref_id": types.StringType, + "resource_id": types.StringType, + "status": types.StringType, + "version": types.StringType, + "topology": apmTopologySchema(), + } +} +func apmTopologySchema() attr.Type { + return types.ListType{ElemType: types.ObjectType{ + AttrTypes: apmTopologyAttrTypes(), + }} +} + +func apmTopologyAttrTypes() map[string]attr.Type { + return map[string]attr.Type{ + "instance_configuration_id": types.StringType, + "size": types.StringType, + "size_resource": types.StringType, + "zone_count": types.Int64Type, } } + +type apmResourceModelV0 struct { + ElasticsearchClusterRefID types.String `tfsdk:"elasticsearch_cluster_ref_id"` + Healthy types.Bool `tfsdk:"healthy"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + RefID types.String `tfsdk:"ref_id"` + ResourceID types.String `tfsdk:"resource_id"` + Status types.String `tfsdk:"status"` + Version types.String `tfsdk:"version"` + Topology types.List `tfsdk:"topology"` //< apmTopologyModelV0 +} + +type apmTopologyModelV0 struct { + InstanceConfigurationID types.String `tfsdk:"instance_configuration_id"` + Size types.String `tfsdk:"size"` + SizeResource types.String `tfsdk:"size_resource"` + ZoneCount types.Int64 `tfsdk:"zone_count"` +} diff --git a/ec/ecdatasource/deploymentdatasource/schema_elasticsearch.go b/ec/ecdatasource/deploymentdatasource/schema_elasticsearch.go index d742c18ce..17d24e00a 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_elasticsearch.go +++ b/ec/ecdatasource/deploymentdatasource/schema_elasticsearch.go @@ -18,139 +18,103 @@ package deploymentdatasource import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" ) -func newElasticsearchResourceInfo() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "autoscale": { - Type: schema.TypeString, - Computed: true, - }, - "healthy": { - Type: schema.TypeBool, - Computed: true, - }, - "cloud_id": { - Type: schema.TypeString, - Computed: true, - }, - "http_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "https_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "ref_id": { - Type: schema.TypeString, - Computed: true, - }, - "resource_id": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "version": { - Type: schema.TypeString, - Computed: true, - }, - "topology": elasticsearchTopologySchema(), - }, +func elasticsearchResourceInfoSchema() tfsdk.Attribute { + // TODO should we use tfsdk.ListNestedAttributes here? - see https://github.com/hashicorp/terraform-provider-hashicups-pf/blob/8f222d805d39445673e442a674168349a45bc054/hashicups/data_source_coffee.go#L22 + return tfsdk.Attribute{ + Computed: true, + Type: types.ListType{ElemType: types.ObjectType{ + AttrTypes: elasticsearchResourceInfoAttrTypes(), + }}, } } -func elasticsearchTopologySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_configuration_id": { - Type: schema.TypeString, - Computed: true, - }, - "size": { - Type: schema.TypeString, - Computed: true, - }, - "size_resource": { - Type: schema.TypeString, - Computed: true, - }, - "zone_count": { - Type: schema.TypeInt, - Computed: true, - }, - "node_type_data": { - Type: schema.TypeBool, - Computed: true, - }, - "node_type_master": { - Type: schema.TypeBool, - Computed: true, - }, - "node_type_ingest": { - Type: schema.TypeBool, - Computed: true, - }, - "node_type_ml": { - Type: schema.TypeBool, - Optional: true, - }, - "node_roles": { - Type: schema.TypeSet, - Set: schema.HashString, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - "autoscaling": { - Type: schema.TypeList, - Description: "Optional Elasticsearch autoscaling settings, such a maximum and minimum size and resources.", - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "max_size_resource": { - Description: "Maximum resource type for the maximum autoscaling setting.", - Type: schema.TypeString, - Computed: true, - }, +func elasticsearchResourceInfoAttrTypes() map[string]attr.Type { + return map[string]attr.Type{ + "autoscale": types.StringType, + "healthy": types.BoolType, + "cloud_id": types.StringType, + "http_endpoint": types.StringType, + "https_endpoint": types.StringType, + "ref_id": types.StringType, + "resource_id": types.StringType, + "status": types.StringType, + "version": types.StringType, + "topology": elasticsearchTopologySchema(), + } +} - "max_size": { - Description: "Maximum size value for the maximum autoscaling setting.", - Type: schema.TypeString, - Computed: true, - }, +func elasticsearchTopologySchema() attr.Type { + return types.ListType{ElemType: types.ObjectType{ + AttrTypes: elasticsearchTopologyAttrTypes(), + }} +} - "min_size_resource": { - Description: "Minimum resource type for the minimum autoscaling setting.", - Type: schema.TypeString, - Computed: true, - }, +func elasticsearchTopologyAttrTypes() map[string]attr.Type { + return map[string]attr.Type{ + "instance_configuration_id": types.StringType, + "size": types.StringType, + "size_resource": types.StringType, + "zone_count": types.Int64Type, + "node_type_data": types.BoolType, + "node_type_master": types.BoolType, + "node_type_ingest": types.BoolType, + "node_type_ml": types.BoolType, + "node_roles": types.SetType{ElemType: types.StringType}, + "autoscaling": elasticsearchAutoscalingSchema(), // Optional Elasticsearch autoscaling settings, such a maximum and minimum size and resources. + } +} - "min_size": { - Description: "Minimum size value for the minimum autoscaling setting.", - Type: schema.TypeString, - Computed: true, - }, +func elasticsearchAutoscalingSchema() attr.Type { + return types.ListType{ElemType: types.ObjectType{ + AttrTypes: elasticsearchAutoscalingAttrTypes(), + }} +} - "policy_override_json": { - Type: schema.TypeString, - Description: "Computed policy overrides set directly via the API or other clients.", - Computed: true, - }, - }, - }, - }, - }, - }, +func elasticsearchAutoscalingAttrTypes() map[string]attr.Type { + return map[string]attr.Type{ + "max_size_resource": types.StringType, // Maximum resource type for the maximum autoscaling setting. + "max_size": types.StringType, // Maximum size value for the maximum autoscaling setting. + "min_size_resource": types.StringType, // Minimum resource type for the minimum autoscaling setting. + "min_size": types.StringType, // Minimum size value for the minimum autoscaling setting. + "policy_override_json": types.StringType, // Computed policy overrides set directly via the API or other clients. } } + +type elasticsearchResourceModelV0 struct { + Autoscale types.String `tfsdk:"autoscale"` + Healthy types.Bool `tfsdk:"healthy"` + CloudID types.String `tfsdk:"cloud_id"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + RefID types.String `tfsdk:"ref_id"` + ResourceID types.String `tfsdk:"resource_id"` + Status types.String `tfsdk:"status"` + Version types.String `tfsdk:"version"` + Topology types.List `tfsdk:"topology"` //< elasticsearchTopologyModelV0 +} + +type elasticsearchTopologyModelV0 struct { + InstanceConfigurationID types.String `tfsdk:"instance_configuration_id"` + Size types.String `tfsdk:"size"` + SizeResource types.String `tfsdk:"size_resource"` + ZoneCount types.Int64 `tfsdk:"zone_count"` + NodeTypeData types.Bool `tfsdk:"node_type_data"` + NodeTypeMaster types.Bool `tfsdk:"node_type_master"` + NodeTypeIngest types.Bool `tfsdk:"node_type_ingest"` + NodeTypeMl types.Bool `tfsdk:"node_type_ml"` + NodeRoles types.Set `tfsdk:"node_roles"` + Autoscaling types.List `tfsdk:"autoscaling"` //< elasticsearchAutoscalingModel +} + +type elasticsearchAutoscalingModel struct { + MaxSizeResource types.String `tfsdk:"max_size_resource"` + MaxSize types.String `tfsdk:"max_size"` + MinSizeResource types.String `tfsdk:"min_size_resource"` + MinSize types.String `tfsdk:"min_size"` + PolicyOverrideJson types.String `tfsdk:"policy_override_json"` +} diff --git a/ec/ecdatasource/deploymentdatasource/schema_enterprise_search.go b/ec/ecdatasource/deploymentdatasource/schema_enterprise_search.go index ec7cfeb5d..b47fe0684 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_enterprise_search.go +++ b/ec/ecdatasource/deploymentdatasource/schema_enterprise_search.go @@ -18,86 +18,70 @@ package deploymentdatasource import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" ) -func newEnterpriseSearchResourceInfo() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "elasticsearch_cluster_ref_id": { - Type: schema.TypeString, - Computed: true, - }, - "healthy": { - Type: schema.TypeBool, - Computed: true, - }, - "http_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "https_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "ref_id": { - Type: schema.TypeString, - Computed: true, - }, - "resource_id": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "version": { - Type: schema.TypeString, - Computed: true, - }, - "topology": enterpriseSearchTopologySchema(), - }, +func enterpriseSearchResourceInfoSchema() tfsdk.Attribute { + // TODO should we use tfsdk.ListNestedAttributes here? - see https://github.com/hashicorp/terraform-provider-hashicups-pf/blob/8f222d805d39445673e442a674168349a45bc054/hashicups/data_source_coffee.go#L22 + return tfsdk.Attribute{ + Computed: true, + Type: types.ListType{ElemType: types.ObjectType{ + AttrTypes: enterpriseSearchResourceInfoAttrTypes(), + }}, } } -func enterpriseSearchTopologySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_configuration_id": { - Type: schema.TypeString, - Computed: true, - }, - "size": { - Type: schema.TypeString, - Computed: true, - }, - "size_resource": { - Type: schema.TypeString, - Computed: true, - }, - "zone_count": { - Type: schema.TypeInt, - Computed: true, - }, - "node_type_appserver": { - Type: schema.TypeBool, - Computed: true, - }, - - "node_type_connector": { - Type: schema.TypeBool, - Computed: true, - }, +func enterpriseSearchResourceInfoAttrTypes() map[string]attr.Type { + return map[string]attr.Type{ + "elasticsearch_cluster_ref_id": types.StringType, + "healthy": types.BoolType, + "http_endpoint": types.StringType, + "https_endpoint": types.StringType, + "ref_id": types.StringType, + "resource_id": types.StringType, + "status": types.StringType, + "version": types.StringType, + "topology": enterpriseSearchTopologySchema(), + } +} +func enterpriseSearchTopologySchema() attr.Type { + return types.ListType{ElemType: types.ObjectType{ + AttrTypes: enterpriseSearchTopologyAttrTypes(), + }} +} - "node_type_worker": { - Type: schema.TypeBool, - Computed: true, - }, - }, - }, +func enterpriseSearchTopologyAttrTypes() map[string]attr.Type { + return map[string]attr.Type{ + "instance_configuration_id": types.StringType, + "size": types.StringType, + "size_resource": types.StringType, + "zone_count": types.Int64Type, + "node_type_appserver": types.BoolType, + "node_type_connector": types.BoolType, + "node_type_worker": types.BoolType, } } + +type enterpriseSearchResourceModelV0 struct { + ElasticsearchClusterRefID types.String `tfsdk:"elasticsearch_cluster_ref_id"` + Healthy types.Bool `tfsdk:"healthy"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + RefID types.String `tfsdk:"ref_id"` + ResourceID types.String `tfsdk:"resource_id"` + Status types.String `tfsdk:"status"` + Version types.String `tfsdk:"version"` + Topology types.List `tfsdk:"topology"` //< enterpriseSearchTopologyModelV0 +} + +type enterpriseSearchTopologyModelV0 struct { + InstanceConfigurationID types.String `tfsdk:"instance_configuration_id"` + Size types.String `tfsdk:"size"` + SizeResource types.String `tfsdk:"size_resource"` + ZoneCount types.Int64 `tfsdk:"zone_count"` + NodeTypeAppserver types.Bool `tfsdk:"node_type_appserver"` + NodeTypeConnector types.Bool `tfsdk:"node_type_connector"` + NodeTypeWorker types.Bool `tfsdk:"node_type_worker"` +} diff --git a/ec/ecdatasource/deploymentdatasource/schema_integrations_server.go b/ec/ecdatasource/deploymentdatasource/schema_integrations_server.go index a9f8bfc6e..f650cc60a 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_integrations_server.go +++ b/ec/ecdatasource/deploymentdatasource/schema_integrations_server.go @@ -18,72 +18,64 @@ package deploymentdatasource import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" ) -func newIntegrationsServerResourceInfo() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "elasticsearch_cluster_ref_id": { - Type: schema.TypeString, - Computed: true, - }, - "healthy": { - Type: schema.TypeBool, - Computed: true, - }, - "http_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "https_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "ref_id": { - Type: schema.TypeString, - Computed: true, - }, - "resource_id": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "version": { - Type: schema.TypeString, - Computed: true, - }, - "topology": integrationsServerTopologySchema(), - }, +func integrationsServerResourceInfoSchema() tfsdk.Attribute { + // TODO should we use tfsdk.ListNestedAttributes here? - see https://github.com/hashicorp/terraform-provider-hashicups-pf/blob/8f222d805d39445673e442a674168349a45bc054/hashicups/data_source_coffee.go#L22 + return tfsdk.Attribute{ + Computed: true, + Type: types.ListType{ElemType: types.ObjectType{ + AttrTypes: integrationsServerResourceInfoAttrTypes(), + }}, } } -func integrationsServerTopologySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_configuration_id": { - Type: schema.TypeString, - Computed: true, - }, - "size": { - Type: schema.TypeString, - Computed: true, - }, - "size_resource": { - Type: schema.TypeString, - Computed: true, - }, - "zone_count": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, +func integrationsServerResourceInfoAttrTypes() map[string]attr.Type { + return map[string]attr.Type{ + "elasticsearch_cluster_ref_id": types.StringType, + "healthy": types.BoolType, + "http_endpoint": types.StringType, + "https_endpoint": types.StringType, + "ref_id": types.StringType, + "resource_id": types.StringType, + "status": types.StringType, + "version": types.StringType, + "topology": integrationsServerTopologySchema(), + } +} +func integrationsServerTopologySchema() attr.Type { + return types.ListType{ElemType: types.ObjectType{ + AttrTypes: integrationsServerTopologyAttrTypes(), + }} +} + +func integrationsServerTopologyAttrTypes() map[string]attr.Type { + return map[string]attr.Type{ + "instance_configuration_id": types.StringType, + "size": types.StringType, + "size_resource": types.StringType, + "zone_count": types.Int64Type, } } + +type integrationsServerResourceModelV0 struct { + ElasticsearchClusterRefID types.String `tfsdk:"elasticsearch_cluster_ref_id"` + Healthy types.Bool `tfsdk:"healthy"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + RefID types.String `tfsdk:"ref_id"` + ResourceID types.String `tfsdk:"resource_id"` + Status types.String `tfsdk:"status"` + Version types.String `tfsdk:"version"` + Topology types.List `tfsdk:"topology"` //< integrationsServerTopologyModelV0 +} + +type integrationsServerTopologyModelV0 struct { + InstanceConfigurationID types.String `tfsdk:"instance_configuration_id"` + Size types.String `tfsdk:"size"` + SizeResource types.String `tfsdk:"size_resource"` + ZoneCount types.Int64 `tfsdk:"zone_count"` +} diff --git a/ec/ecdatasource/deploymentdatasource/schema_kibana.go b/ec/ecdatasource/deploymentdatasource/schema_kibana.go index b0ef4bce1..3b425ddf3 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_kibana.go +++ b/ec/ecdatasource/deploymentdatasource/schema_kibana.go @@ -18,72 +18,64 @@ package deploymentdatasource import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" ) -func newKibanaResourceInfo() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "elasticsearch_cluster_ref_id": { - Type: schema.TypeString, - Computed: true, - }, - "healthy": { - Type: schema.TypeBool, - Computed: true, - }, - "http_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "https_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "ref_id": { - Type: schema.TypeString, - Computed: true, - }, - "resource_id": { - Type: schema.TypeString, - Computed: true, - }, - "status": { - Type: schema.TypeString, - Computed: true, - }, - "version": { - Type: schema.TypeString, - Computed: true, - }, - "topology": kibanaTopologySchema(), - }, +func kibanaResourceInfoSchema() tfsdk.Attribute { + // TODO should we use tfsdk.ListNestedAttributes here? - see https://github.com/hashicorp/terraform-provider-hashicups-pf/blob/8f222d805d39445673e442a674168349a45bc054/hashicups/data_source_coffee.go#L22 + return tfsdk.Attribute{ + Computed: true, + Type: types.ListType{ElemType: types.ObjectType{ + AttrTypes: kibanaResourceInfoAttrTypes(), + }}, } } -func kibanaTopologySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_configuration_id": { - Type: schema.TypeString, - Computed: true, - }, - "size": { - Type: schema.TypeString, - Computed: true, - }, - "size_resource": { - Type: schema.TypeString, - Computed: true, - }, - "zone_count": { - Type: schema.TypeInt, - Computed: true, - }, - }, - }, +func kibanaResourceInfoAttrTypes() map[string]attr.Type { + return map[string]attr.Type{ + "elasticsearch_cluster_ref_id": types.StringType, + "healthy": types.BoolType, + "http_endpoint": types.StringType, + "https_endpoint": types.StringType, + "ref_id": types.StringType, + "resource_id": types.StringType, + "status": types.StringType, + "version": types.StringType, + "topology": kibanaTopologySchema(), + } +} +func kibanaTopologySchema() attr.Type { + return types.ListType{ElemType: types.ObjectType{ + AttrTypes: kibanaTopologyAttrTypes(), + }} +} + +func kibanaTopologyAttrTypes() map[string]attr.Type { + return map[string]attr.Type{ + "instance_configuration_id": types.StringType, + "size": types.StringType, + "size_resource": types.StringType, + "zone_count": types.Int64Type, } } + +type kibanaResourceModelV0 struct { + ElasticsearchClusterRefID types.String `tfsdk:"elasticsearch_cluster_ref_id"` + Healthy types.Bool `tfsdk:"healthy"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + RefID types.String `tfsdk:"ref_id"` + ResourceID types.String `tfsdk:"resource_id"` + Status types.String `tfsdk:"status"` + Version types.String `tfsdk:"version"` + Topology types.List `tfsdk:"topology"` //< kibanaTopologyModelV0 +} + +type kibanaTopologyModelV0 struct { + InstanceConfigurationID types.String `tfsdk:"instance_configuration_id"` + Size types.String `tfsdk:"size"` + SizeResource types.String `tfsdk:"size_resource"` + ZoneCount types.Int64 `tfsdk:"zone_count"` +} diff --git a/ec/ecdatasource/deploymentdatasource/schema_observability.go b/ec/ecdatasource/deploymentdatasource/schema_observability.go new file mode 100644 index 000000000..485447989 --- /dev/null +++ b/ec/ecdatasource/deploymentdatasource/schema_observability.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package deploymentdatasource + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func observabilitySettingsSchema() tfsdk.Attribute { + // TODO should we use tfsdk.ListNestedAttributes here? - see https://github.com/hashicorp/terraform-provider-hashicups-pf/blob/8f222d805d39445673e442a674168349a45bc054/hashicups/data_source_coffee.go#L22 + return tfsdk.Attribute{ + Computed: true, + Type: types.ListType{ElemType: types.ObjectType{ + AttrTypes: observabilitySettingsAttrTypes(), + }}, + } +} + +func observabilitySettingsAttrTypes() map[string]attr.Type { + return map[string]attr.Type{ + "deployment_id": types.StringType, + "ref_id": types.StringType, + "logs": types.BoolType, + "metrics": types.BoolType, + } +} + +type observabilitySettingsModel struct { + DeploymentID types.String `tfsdk:"deployment_id"` + RefID types.String `tfsdk:"ref_id"` + Logs types.Bool `tfsdk:"logs"` + Metrics types.Bool `tfsdk:"metrics"` +} diff --git a/ec/ecdatasource/deploymentsdatasource/datasource.go b/ec/ecdatasource/deploymentsdatasource/datasource.go index 23eabdd1b..0fbd64040 100644 --- a/ec/ecdatasource/deploymentsdatasource/datasource.go +++ b/ec/ecdatasource/deploymentsdatasource/datasource.go @@ -19,108 +19,129 @@ package deploymentsdatasource import ( "context" + "fmt" + "github.com/elastic/terraform-provider-ec/ec/internal" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" "strconv" - "time" - "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -// DataSource returns the ec_deployments data source schema. -func DataSource() *schema.Resource { - return &schema.Resource{ - ReadContext: read, +var _ provider.DataSourceType = (*DataSourceType)(nil) - Schema: newSchema(), +type DataSourceType struct{} - Timeouts: &schema.ResourceTimeout{ - Default: schema.DefaultTimeout(5 * time.Minute), - }, - } +func (s DataSourceType) NewDataSource(ctx context.Context, p provider.Provider) (datasource.DataSource, diag.Diagnostics) { + return &deploymentsDataSource{ + p: p.(internal.Provider), + }, nil } -func read(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*api.API) +var _ datasource.DataSource = (*deploymentsDataSource)(nil) - query, err := expandFilters(d) - if err != nil { - return diag.FromErr(err) +type deploymentsDataSource struct { + p internal.Provider +} + +func (d deploymentsDataSource) Read(ctx context.Context, request datasource.ReadRequest, response *datasource.ReadResponse) { + var newState modelV0 + response.Diagnostics.Append(request.Config.Get(ctx, &newState)...) + if response.Diagnostics.HasError() { + return + } + + query, diags := expandFilters(ctx, newState) + response.Diagnostics.Append(diags...) + if diags.HasError() { + return } res, err := deploymentapi.Search(deploymentapi.SearchParams{ - API: client, + API: d.p.GetClient(), Request: query, }) if err != nil { - return diag.FromErr(multierror.NewPrefixed("failed searching deployments", err)) + response.Diagnostics.AddError( + "Failed searching deployments", + fmt.Sprintf("Failed searching deployments version: %s", err), + ) + return } - if err := modelToState(d, res); err != nil { - return diag.FromErr(err) + response.Diagnostics.Append(modelToState(ctx, res, &newState)...) + if response.Diagnostics.HasError() { + return } - return nil + // Finally, set the state + response.Diagnostics.Append(response.State.Set(ctx, newState)...) } -func modelToState(d *schema.ResourceData, res *models.DeploymentsSearchResponse) error { - if d.Id() == "" { - if b, _ := res.MarshalBinary(); len(b) > 0 { - d.SetId(strconv.Itoa(schema.HashString(string(b)))) - } - } +/* TODO - see https://github.com/multani/terraform-provider-camunda/pull/16/files +Timeouts: &schema.ResourceTimeout{ + Default: schema.DefaultTimeout(5 * time.Minute), +}, +*/ + +func modelToState(ctx context.Context, res *models.DeploymentsSearchResponse, state *modelV0) diag.Diagnostics { + var diags diag.Diagnostics - if err := d.Set("return_count", res.ReturnCount); err != nil { - return err + if b, _ := res.MarshalBinary(); len(b) > 0 { + state.ID = types.String{Value: strconv.Itoa(schema.HashString(string(b)))} } + state.ReturnCount = types.Int64{Value: int64(*res.ReturnCount)} - var result = make([]interface{}, 0, len(res.Deployments)) + var result = make([]deploymentModelV0, 0, len(res.Deployments)) for _, deployment := range res.Deployments { - var m = make(map[string]interface{}) + var m deploymentModelV0 - m["deployment_id"] = *deployment.ID - m["alias"] = deployment.Alias + m.DeploymentID = types.String{Value: *deployment.ID} + m.Alias = types.String{Value: deployment.Alias} if deployment.Name != nil { - m["name"] = deployment.Name + m.Name = types.String{Value: *deployment.Name} } if len(deployment.Resources.Elasticsearch) > 0 { - m["elasticsearch_resource_id"] = *deployment.Resources.Elasticsearch[0].ID - m["elasticsearch_ref_id"] = *deployment.Resources.Elasticsearch[0].RefID + m.ElasticSearchResourceID = types.String{Value: *deployment.Resources.Elasticsearch[0].ID} + m.ElasticSearchRefID = types.String{Value: *deployment.Resources.Elasticsearch[0].RefID} } if len(deployment.Resources.Kibana) > 0 { - m["kibana_resource_id"] = *deployment.Resources.Kibana[0].ID - m["kibana_ref_id"] = *deployment.Resources.Kibana[0].RefID + m.KibanaResourceID = types.String{Value: *deployment.Resources.Kibana[0].ID} + m.KibanaRefID = types.String{Value: *deployment.Resources.Kibana[0].RefID} } if len(deployment.Resources.Apm) > 0 { - m["apm_resource_id"] = *deployment.Resources.Apm[0].ID - m["apm_ref_id"] = *deployment.Resources.Apm[0].RefID + m.ApmResourceID = types.String{Value: *deployment.Resources.Apm[0].ID} + m.ApmRefID = types.String{Value: *deployment.Resources.Apm[0].RefID} } if len(deployment.Resources.IntegrationsServer) > 0 { - m["integrations_server_resource_id"] = *deployment.Resources.IntegrationsServer[0].ID - m["integrations_server_ref_id"] = *deployment.Resources.IntegrationsServer[0].RefID + m.IntegrationsServerResourceID = types.String{Value: *deployment.Resources.IntegrationsServer[0].ID} + m.IntegrationsServerRefID = types.String{Value: *deployment.Resources.IntegrationsServer[0].RefID} } if len(deployment.Resources.EnterpriseSearch) > 0 { - m["enterprise_search_resource_id"] = *deployment.Resources.EnterpriseSearch[0].ID - m["enterprise_search_ref_id"] = *deployment.Resources.EnterpriseSearch[0].RefID + m.EnterpriseSearchResourceID = types.String{Value: *deployment.Resources.EnterpriseSearch[0].ID} + m.EnterpriseSearchRefID = types.String{Value: *deployment.Resources.EnterpriseSearch[0].RefID} } result = append(result, m) - if len(result) > 0 { - if err := d.Set("deployments", result); err != nil { - return err - } - } } - return nil + diags.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: deploymentAttrTypes(), + }, + }, &state.Deployments)...) + + return diags } diff --git a/ec/ecdatasource/deploymentsdatasource/datasource_test.go b/ec/ecdatasource/deploymentsdatasource/datasource_test.go index 417edf53e..25eaa0f78 100644 --- a/ec/ecdatasource/deploymentsdatasource/datasource_test.go +++ b/ec/ecdatasource/deploymentsdatasource/datasource_test.go @@ -18,47 +18,52 @@ package deploymentsdatasource import ( + "context" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" "testing" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/stretchr/testify/assert" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_modelToState(t *testing.T) { - deploymentsSchemaArg := schema.TestResourceDataRaw(t, newSchema(), nil) - deploymentsSchemaArg.SetId("myID") - _ = deploymentsSchemaArg.Set("name_prefix", "test") - _ = deploymentsSchemaArg.Set("healthy", "true") - _ = deploymentsSchemaArg.Set("deployment_template_id", "azure-compute-optimized") + state := modelV0{ + ID: types.String{Value: "test"}, + NamePrefix: types.String{Value: "test"}, + Healthy: types.String{Value: "true"}, + DeploymentTemplateID: types.String{Value: "azure-compute-optimized"}, + } - wantDeployments := util.NewResourceData(t, util.ResDataParams{ - ID: "myID", - State: map[string]interface{}{ - "id": "myID", - "name_prefix": "test", - "return_count": 1, - "deployment_template_id": "azure-compute-optimized", - "healthy": "true", - "deployments": []interface{}{map[string]interface{}{ - "name": "test-hello", - "alias": "dev", - "apm_resource_id": "9884c76ae1cd4521a0d9918a454a700d", - "apm_ref_id": "apm", - "deployment_id": "a8f22a9b9e684a7f94a89df74aa14331", - "elasticsearch_resource_id": "a98dd0dac15a48d5b3953384c7e571b9", - "elasticsearch_ref_id": "elasticsearch", - "enterprise_search_resource_id": "f17e4d8a61b14c12b020d85b723357ba", - "enterprise_search_ref_id": "enterprise_search", - "kibana_resource_id": "c75297d672b54da68faecededf372f87", - "kibana_ref_id": "kibana", + wantDeployments := modelV0{ + ID: types.String{Value: "2705093922"}, + NamePrefix: types.String{Value: "test"}, + ReturnCount: types.Int64{Value: 1}, + DeploymentTemplateID: types.String{Value: "azure-compute-optimized"}, + Healthy: types.String{Value: "true"}, + Deployments: types.List{ + ElemType: types.ObjectType{AttrTypes: deploymentAttrTypes()}, + Elems: []attr.Value{types.Object{ + AttrTypes: deploymentAttrTypes(), + Attrs: map[string]attr.Value{ + "name": types.String{Value: "test-hello"}, + "alias": types.String{Value: "dev"}, + "apm_resource_id": types.String{Value: "9884c76ae1cd4521a0d9918a454a700d"}, + "apm_ref_id": types.String{Value: "apm"}, + "deployment_id": types.String{Value: "a8f22a9b9e684a7f94a89df74aa14331"}, + "elasticsearch_resource_id": types.String{Value: "a98dd0dac15a48d5b3953384c7e571b9"}, + "elasticsearch_ref_id": types.String{Value: "elasticsearch"}, + "enterprise_search_resource_id": types.String{Value: "f17e4d8a61b14c12b020d85b723357ba"}, + "enterprise_search_ref_id": types.String{Value: "enterprise_search"}, + "kibana_resource_id": types.String{Value: "c75297d672b54da68faecededf372f87"}, + "kibana_ref_id": types.String{Value: "kibana"}, + "integrations_server_resource_id": types.String{Value: "3b3025a012fd3dd5c9dcae2a1ac89c6f"}, + "integrations_server_ref_id": types.String{Value: "integrations_server"}, + }, }}, }, - Schema: newSchema(), - }) + } searchResponse := &models.DeploymentsSearchResponse{ ReturnCount: ec.Int32(1), @@ -105,79 +110,47 @@ func Test_modelToState(t *testing.T) { RefID: ec.String("enterprise_search"), }, }, + IntegrationsServer: []*models.IntegrationsServerResourceInfo{ + { + ID: ec.String("3b3025a012fd3dd5c9dcae2a1ac89c6f"), + RefID: ec.String("integrations_server"), + }, + }, }, }, }, } - deploymentsSchemaArgNoID := schema.TestResourceDataRaw(t, newSchema(), nil) - deploymentsSchemaArgNoID.SetId("") - _ = deploymentsSchemaArgNoID.Set("name_prefix", "test") - _ = deploymentsSchemaArgNoID.Set("healthy", "true") - _ = deploymentsSchemaArgNoID.Set("deployment_template_id", "azure-compute-optimized") - - wantDeploymentsNoID := util.NewResourceData(t, util.ResDataParams{ - ID: "3825846481", - State: map[string]interface{}{ - "id": "myID", - "name_prefix": "test", - "return_count": 1, - "deployment_template_id": "azure-compute-optimized", - "healthy": "true", - "deployments": []interface{}{map[string]interface{}{ - "name": "test-hello", - "alias": "dev", - "apm_resource_id": "9884c76ae1cd4521a0d9918a454a700d", - "apm_ref_id": "apm", - "deployment_id": "a8f22a9b9e684a7f94a89df74aa14331", - "elasticsearch_resource_id": "a98dd0dac15a48d5b3953384c7e571b9", - "elasticsearch_ref_id": "elasticsearch", - "enterprise_search_resource_id": "f17e4d8a61b14c12b020d85b723357ba", - "enterprise_search_ref_id": "enterprise_search", - "kibana_resource_id": "c75297d672b54da68faecededf372f87", - "kibana_ref_id": "kibana", - }}, - }, - Schema: newSchema(), - }) - type args struct { - d *schema.ResourceData - res *models.DeploymentsSearchResponse + state modelV0 + res *models.DeploymentsSearchResponse } tests := []struct { - name string - args args - want *schema.ResourceData - err error + name string + args args + want modelV0 + diags error }{ { name: "flattens deployment resources", want: wantDeployments, args: args{ - d: deploymentsSchemaArg, - res: searchResponse, - }, - }, - { - name: "flattens deployment resources and sets the ID", - args: args{ - d: deploymentsSchemaArgNoID, - res: searchResponse, + state: state, + res: searchResponse, }, - want: wantDeploymentsNoID, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := modelToState(tt.args.d, tt.args.res) - if tt.err != nil || err != nil { - assert.EqualError(t, err, tt.err.Error()) + state = tt.args.state + diags := modelToState(context.Background(), tt.args.res, &state) + if tt.diags != nil { + assert.Equal(t, tt.diags, diags) } else { - assert.NoError(t, err) + assert.Empty(t, diags) } - assert.Equal(t, tt.want.State().Attributes, tt.args.d.State().Attributes) + assert.Equal(t, tt.want, state) }) } } diff --git a/ec/ecdatasource/deploymentsdatasource/expanders.go b/ec/ecdatasource/deploymentsdatasource/expanders.go index 01e5062a4..96f401c5c 100644 --- a/ec/ecdatasource/deploymentsdatasource/expanders.go +++ b/ec/ecdatasource/deploymentsdatasource/expanders.go @@ -18,18 +18,21 @@ package deploymentsdatasource import ( + "context" "fmt" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" ) // expandFilters expands all filters into a search request model -func expandFilters(d *schema.ResourceData) (*models.SearchRequest, error) { +func expandFilters(ctx context.Context, state modelV0) (*models.SearchRequest, diag.Diagnostics) { + var diags diag.Diagnostics var queries []*models.QueryContainer - namePrefix := d.Get("name_prefix").(string) + namePrefix := state.NamePrefix.Value if namePrefix != "" { queries = append(queries, &models.QueryContainer{ Prefix: map[string]models.PrefixQuery{ @@ -40,7 +43,7 @@ func expandFilters(d *schema.ResourceData) (*models.SearchRequest, error) { }) } - depTemplateID := d.Get("deployment_template_id").(string) + depTemplateID := state.DeploymentTemplateID.Value if depTemplateID != "" { esPath := "resources.elasticsearch" tplTermPath := esPath + ".info.plan_info.current.plan.deployment_template.id" @@ -48,10 +51,12 @@ func expandFilters(d *schema.ResourceData) (*models.SearchRequest, error) { queries = append(queries, newNestedTermQuery(esPath, tplTermPath, depTemplateID)) } - healthy := d.Get("healthy").(string) + healthy := state.Healthy.Value if healthy != "" { if healthy != "true" && healthy != "false" { - return nil, fmt.Errorf("invalid value for healthy (true|false): '%s'", healthy) + diags.AddError("invalid value for healthy", + fmt.Sprintf("invalid value for healthy (true|false): '%s'", healthy)) + return nil, diags } queries = append(queries, &models.QueryContainer{ @@ -61,11 +66,16 @@ func expandFilters(d *schema.ResourceData) (*models.SearchRequest, error) { }) } - tags := d.Get("tags").(map[string]interface{}) + var tags = make(map[string]string) + diags.Append(state.Tags.ElementsAs(ctx, &tags, false)...) + if diags.HasError() { + return nil, diags + } + var tagQueries []*models.QueryContainer for key, value := range tags { tagQueries = append(tagQueries, - newNestedTagQuery(key, value.(string)), + newNestedTagQuery(key, value), ) } if len(tagQueries) > 0 { @@ -76,20 +86,29 @@ func expandFilters(d *schema.ResourceData) (*models.SearchRequest, error) { }, }) } + type resourceFilter struct { + resourceKind string + settings *types.List + } - validResourceKinds := []string{util.Elasticsearch, util.Kibana, - util.Apm, util.EnterpriseSearch, util.IntegrationsServer} + resourceFilters := []resourceFilter{ + {resourceKind: util.Elasticsearch, settings: &state.Elasticsearch}, + {resourceKind: util.Kibana, settings: &state.Kibana}, + {resourceKind: util.Apm, settings: &state.Apm}, + {resourceKind: util.EnterpriseSearch, settings: &state.EnterpriseSearch}, + {resourceKind: util.IntegrationsServer, settings: &state.IntegrationsServer}, + } - for _, resourceKind := range validResourceKinds { - req, err := expandResourceFilters(d.Get(resourceKind).([]interface{}), resourceKind) - if err != nil { - return nil, err + for _, filter := range resourceFilters { + req, diags := expandResourceFilters(ctx, filter.settings, filter.resourceKind) + if diags.HasError() { + return nil, diags } queries = append(queries, req...) } searchReq := models.SearchRequest{ - Size: int32(d.Get("size").(int)), + Size: int32(state.Size.Value), Sort: []interface{}{"id"}, } @@ -111,42 +130,45 @@ func expandFilters(d *schema.ResourceData) (*models.SearchRequest, error) { } // expandResourceFilters expands filters from a specific resource kind into query models -func expandResourceFilters(resources []interface{}, resourceKind string) ([]*models.QueryContainer, error) { - if len(resources) == 0 { +func expandResourceFilters(ctx context.Context, resources *types.List, resourceKind string) ([]*models.QueryContainer, diag.Diagnostics) { + var diags diag.Diagnostics + if len(resources.Elems) == 0 { return nil, nil } - + var filters []resourceFiltersModelV0 var queries []*models.QueryContainer - - for _, raw := range resources { - var q = raw.(map[string]interface{}) - + diags.Append(resources.ElementsAs(ctx, &filters, false)...) + if diags.HasError() { + return nil, diags + } + for _, filter := range filters { resourceKindPath := "resources." + resourceKind - if status, ok := q["status"].(string); ok && status != "" { + if filter.Status.Value != "" { statusTermPath := resourceKindPath + ".info.status" queries = append(queries, - newNestedTermQuery(resourceKindPath, statusTermPath, status)) + newNestedTermQuery(resourceKindPath, statusTermPath, filter.Status.Value)) } - if version, ok := q["version"].(string); ok && version != "" { + if filter.Version.Value != "" { versionTermPath := resourceKindPath + ".info.plan_info.current.plan." + resourceKind + ".version" queries = append(queries, - newNestedTermQuery(resourceKindPath, versionTermPath, version)) + newNestedTermQuery(resourceKindPath, versionTermPath, filter.Version.Value)) } - if healthy, ok := q["healthy"].(string); ok && healthy != "" { + if filter.Healthy.Value != "" { healthyTermPath := resourceKindPath + ".info.healthy" - if healthy != "true" && healthy != "false" { - return nil, fmt.Errorf("invalid value for healthy (true|false): '%s'", healthy) + if filter.Healthy.Value != "true" && filter.Healthy.Value != "false" { + diags.AddError("invalid value for healthy", fmt.Sprintf("invalid value for healthy (true|false): '%s'", filter.Healthy.Value)) + return nil, diags } queries = append(queries, - newNestedTermQuery(resourceKindPath, healthyTermPath, healthy)) + newNestedTermQuery(resourceKindPath, healthyTermPath, filter.Healthy.Value)) } } diff --git a/ec/ecdatasource/deploymentsdatasource/expanders_test.go b/ec/ecdatasource/deploymentsdatasource/expanders_test.go index 6410e1f4f..6727be188 100644 --- a/ec/ecdatasource/deploymentsdatasource/expanders_test.go +++ b/ec/ecdatasource/deploymentsdatasource/expanders_test.go @@ -18,41 +18,32 @@ package deploymentsdatasource import ( + "context" "encoding/json" - "errors" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" "testing" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/stretchr/testify/assert" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_expandFilters(t *testing.T) { - deploymentsDS := util.NewResourceData(t, util.ResDataParams{ - ID: "myID", - State: newSampleFilters(), - Schema: newSchema(), - }) - invalidDS := util.NewResourceData(t, util.ResDataParams{ - ID: "myID", - State: newInvalidFilters(), - Schema: newSchema(), - }) type args struct { - d *schema.ResourceData + state modelV0 } tests := []struct { - name string - args args - want *models.SearchRequest - err error + name string + args args + want *models.SearchRequest + diags diag.Diagnostics }{ { name: "parses the data source", - args: args{d: deploymentsDS}, + args: args{state: newSampleFilters()}, want: &models.SearchRequest{ Size: 100, Sort: []interface{}{"id"}, @@ -71,38 +62,58 @@ func Test_expandFilters(t *testing.T) { }, { name: "parses the data source with a different size", - args: args{d: util.NewResourceData(t, util.ResDataParams{ - ID: "myID", - Schema: newSchema(), - State: map[string]interface{}{ - "name_prefix": "test", - "healthy": "true", - "size": 200, - "tags": map[string]interface{}{ - "foo": "bar", - }, - "elasticsearch": []interface{}{ - map[string]interface{}{ - "version": "7.9.1", - }, + args: args{ + state: modelV0{ + NamePrefix: types.String{Value: "test"}, + Healthy: types.String{Value: "true"}, + Size: types.Int64{Value: 200}, + Tags: util.StringMapAsType(map[string]string{"foo": "bar"}), + Elasticsearch: types.List{ + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes()}, + Elems: []attr.Value{types.Object{ + AttrTypes: resourceFiltersAttrTypes(), + Attrs: map[string]attr.Value{ + "healthy": types.String{Null: true}, + "status": types.String{Null: true}, + "version": types.String{Value: "7.9.1"}, + }, + }}, }, - "kibana": []interface{}{ - map[string]interface{}{ - "status": "started", - }, + Kibana: types.List{ + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes()}, + Elems: []attr.Value{types.Object{ + AttrTypes: resourceFiltersAttrTypes(), + Attrs: map[string]attr.Value{ + "healthy": types.String{Null: true}, + "status": types.String{Value: "started"}, + "version": types.String{Null: true}, + }, + }}, }, - "apm": []interface{}{ - map[string]interface{}{ - "healthy": "true", - }, + Apm: types.List{ + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes()}, + Elems: []attr.Value{types.Object{ + AttrTypes: resourceFiltersAttrTypes(), + Attrs: map[string]attr.Value{ + "healthy": types.String{Value: "true"}, + "status": types.String{Null: true}, + "version": types.String{Null: true}, + }, + }}, }, - "enterprise_search": []interface{}{ - map[string]interface{}{ - "healthy": "false", - }, + EnterpriseSearch: types.List{ + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes()}, + Elems: []attr.Value{types.Object{ + AttrTypes: resourceFiltersAttrTypes(), + Attrs: map[string]attr.Value{ + "status": types.String{Null: true}, + "healthy": types.String{Value: "false"}, + "version": types.String{Null: true}, + }, + }}, }, }, - })}, + }, want: &models.SearchRequest{ Size: 200, Sort: []interface{}{"id"}, @@ -120,18 +131,18 @@ func Test_expandFilters(t *testing.T) { }, }, { - name: "fails to parse the data source", - args: args{d: invalidDS}, - err: errors.New("invalid value for healthy (true|false): 'invalid value'"), + name: "fails to parse the data source", + args: args{state: newInvalidFilters()}, + diags: diag.Diagnostics{diag.NewErrorDiagnostic("invalid value for healthy", "invalid value for healthy (true|false): 'invalid value'")}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := expandFilters(tt.args.d) - if tt.err != nil || err != nil { - assert.EqualError(t, err, tt.err.Error()) + got, diags := expandFilters(context.Background(), tt.args.state) + if tt.diags != nil { + assert.Equal(t, tt.diags, diags) } else { - assert.NoError(t, err) + assert.Empty(t, diags) } jsonWant, err := json.MarshalIndent(tt.want, "", " ") @@ -149,43 +160,72 @@ func Test_expandFilters(t *testing.T) { } } -func newInvalidFilters() map[string]interface{} { - return map[string]interface{}{ - "healthy": "invalid value", - "apm": []interface{}{ - map[string]interface{}{ - "healthy": "invalid value", - }, +func newInvalidFilters() modelV0 { + return modelV0{ + Healthy: types.String{Value: "invalid value"}, + Apm: types.List{ + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes()}, + Elems: []attr.Value{types.Object{ + AttrTypes: resourceFiltersAttrTypes(), + Attrs: map[string]attr.Value{ + "healthy": types.String{Value: "invalid value"}, + }, + }}, }, } } -func newSampleFilters() map[string]interface{} { - return map[string]interface{}{ - "name_prefix": "test", - "healthy": "true", - "tags": map[string]interface{}{ - "foo": "bar", - }, - "elasticsearch": []interface{}{ - map[string]interface{}{ - "version": "7.9.1", - }, +func newSampleFilters() modelV0 { + return modelV0{ + NamePrefix: types.String{Value: "test"}, + Healthy: types.String{Value: "true"}, + Size: types.Int64{Value: 100}, + Tags: types.Map{ElemType: types.StringType, Elems: map[string]attr.Value{ + "foo": types.String{Value: "bar"}, + }}, + Elasticsearch: types.List{ + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes()}, + Elems: []attr.Value{types.Object{ + AttrTypes: resourceFiltersAttrTypes(), + Attrs: map[string]attr.Value{ + "healthy": types.String{Null: true}, + "status": types.String{Null: true}, + "version": types.String{Value: "7.9.1"}, + }, + }}, }, - "kibana": []interface{}{ - map[string]interface{}{ - "status": "started", - }, + Kibana: types.List{ + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes()}, + Elems: []attr.Value{types.Object{ + AttrTypes: resourceFiltersAttrTypes(), + Attrs: map[string]attr.Value{ + "healthy": types.String{Null: true}, + "status": types.String{Value: "started"}, + "version": types.String{Null: true}, + }, + }}, }, - "apm": []interface{}{ - map[string]interface{}{ - "healthy": "true", - }, + Apm: types.List{ + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes()}, + Elems: []attr.Value{types.Object{ + AttrTypes: resourceFiltersAttrTypes(), + Attrs: map[string]attr.Value{ + "healthy": types.String{Value: "true"}, + "status": types.String{Null: true}, + "version": types.String{Null: true}, + }, + }}, }, - "enterprise_search": []interface{}{ - map[string]interface{}{ - "healthy": "false", - }, + EnterpriseSearch: types.List{ + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes()}, + Elems: []attr.Value{types.Object{ + AttrTypes: resourceFiltersAttrTypes(), + Attrs: map[string]attr.Value{ + "status": types.String{Null: true}, + "healthy": types.String{Value: "false"}, + "version": types.String{Null: true}, + }, + }}, }, } } diff --git a/ec/ecdatasource/deploymentsdatasource/schema.go b/ec/ecdatasource/deploymentsdatasource/schema.go index 8504e9676..7275db68b 100644 --- a/ec/ecdatasource/deploymentsdatasource/schema.go +++ b/ec/ecdatasource/deploymentsdatasource/schema.go @@ -17,154 +17,144 @@ package deploymentsdatasource -import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +import ( + "context" + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) -func newSchema() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "name_prefix": { - Type: schema.TypeString, - Optional: true, - }, - "healthy": { - Type: schema.TypeString, - Optional: true, - }, - "deployment_template_id": { - Type: schema.TypeString, - Optional: true, - }, - "tags": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "size": { - Type: schema.TypeInt, - Optional: true, - Default: 100, - }, - - // Computed - "return_count": { - Type: schema.TypeInt, - Computed: true, - }, - "deployments": { - Type: schema.TypeList, - Computed: true, - Elem: newDeploymentList(), - }, - - // Deployment resources - "elasticsearch": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: newResourceFilters(), - }, - "kibana": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: newResourceFilters(), - }, - "apm": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: newResourceFilters(), - }, - "integrations_server": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: newResourceFilters(), - }, - "enterprise_search": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: newResourceFilters(), - }, - } -} - -func newDeploymentList() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "deployment_id": { - Type: schema.TypeString, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - "alias": { - Type: schema.TypeString, - Computed: true, - }, - "elasticsearch_resource_id": { - Type: schema.TypeString, - Computed: true, - }, - "elasticsearch_ref_id": { - Type: schema.TypeString, - Computed: true, - }, - "kibana_resource_id": { - Type: schema.TypeString, - Computed: true, - }, - "kibana_ref_id": { - Type: schema.TypeString, - Computed: true, - }, - "apm_resource_id": { - Type: schema.TypeString, - Computed: true, +func (s DataSourceType) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { + return tfsdk.Schema{ + Attributes: map[string]tfsdk.Attribute{ + "name_prefix": { + Type: types.StringType, + Optional: true, }, - "apm_ref_id": { - Type: schema.TypeString, - Computed: true, + "healthy": { + Type: types.StringType, + Optional: true, }, - "integrations_server_resource_id": { - Type: schema.TypeString, - Computed: true, + "deployment_template_id": { + Type: types.StringType, + Optional: true, }, - "integrations_server_ref_id": { - Type: schema.TypeString, - Computed: true, + "tags": { + Type: types.MapType{ElemType: types.StringType}, + Optional: true, }, - "enterprise_search_resource_id": { - Type: schema.TypeString, - Computed: true, + "size": { + Type: types.Int64Type, + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.Int64{Value: 100}), + }, }, - "enterprise_search_ref_id": { - Type: schema.TypeString, + + // Computed + "id": { + Type: types.StringType, + Computed: true, + MarkdownDescription: "Unique identifier of this data source.", + }, + "return_count": { + Type: types.Int64Type, Computed: true, }, + "deployments": deploymentsListSchema(), + + // Deployment resources + "elasticsearch": resourceFiltersSchema(), + "kibana": resourceFiltersSchema(), + "apm": resourceFiltersSchema(), + "integrations_server": resourceFiltersSchema(), + "enterprise_search": resourceFiltersSchema(), }, + }, nil +} + +func deploymentsListSchema() tfsdk.Attribute { + // TODO should we use tfsdk.ListNestedAttributes here? - see https://github.com/hashicorp/terraform-provider-hashicups-pf/blob/8f222d805d39445673e442a674168349a45bc054/hashicups/data_source_coffee.go#L22 + return tfsdk.Attribute{ + Computed: true, + Type: types.ListType{ElemType: types.ObjectType{ + AttrTypes: deploymentAttrTypes(), + }}, } } -func newResourceFilters() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "healthy": { - Type: schema.TypeString, - Optional: true, - }, - "status": { - Type: schema.TypeString, - Optional: true, - }, - "version": { - Type: schema.TypeString, - Optional: true, - }, - }, +func deploymentAttrTypes() map[string]attr.Type { + return map[string]attr.Type{ + "deployment_id": types.StringType, + "name": types.StringType, + "alias": types.StringType, + "elasticsearch_resource_id": types.StringType, + "elasticsearch_ref_id": types.StringType, + "kibana_resource_id": types.StringType, + "kibana_ref_id": types.StringType, + "apm_resource_id": types.StringType, + "apm_ref_id": types.StringType, + "integrations_server_resource_id": types.StringType, + "integrations_server_ref_id": types.StringType, + "enterprise_search_resource_id": types.StringType, + "enterprise_search_ref_id": types.StringType, + } +} + +func resourceFiltersSchema() tfsdk.Attribute { + // TODO should we use tfsdk.ListNestedAttributes here? - see https://github.com/hashicorp/terraform-provider-hashicups-pf/blob/8f222d805d39445673e442a674168349a45bc054/hashicups/data_source_coffee.go#L22 + return tfsdk.Attribute{ + Optional: true, + Type: types.ListType{ElemType: types.ObjectType{ + AttrTypes: resourceFiltersAttrTypes(), + }}, + } +} + +func resourceFiltersAttrTypes() map[string]attr.Type { + return map[string]attr.Type{ + "healthy": types.StringType, + "status": types.StringType, + "version": types.StringType, } } + +type modelV0 struct { + ID types.String `tfsdk:"id"` + NamePrefix types.String `tfsdk:"name_prefix"` + Healthy types.String `tfsdk:"healthy"` + DeploymentTemplateID types.String `tfsdk:"deployment_template_id"` + Tags types.Map `tfsdk:"tags"` + Size types.Int64 `tfsdk:"size"` + ReturnCount types.Int64 `tfsdk:"return_count"` + Deployments types.List `tfsdk:"deployments"` //< deploymentModelV0 + Elasticsearch types.List `tfsdk:"elasticsearch"` //< resourceFiltersModelV0 + Kibana types.List `tfsdk:"kibana"` //< resourceFiltersModelV0 + Apm types.List `tfsdk:"apm"` //< resourceFiltersModelV0 + IntegrationsServer types.List `tfsdk:"integrations_server"` //< resourceFiltersModelV0 + EnterpriseSearch types.List `tfsdk:"enterprise_search"` //< resourceFiltersModelV0 +} + +type deploymentModelV0 struct { + DeploymentID types.String `tfsdk:"deployment_id"` + Name types.String `tfsdk:"name"` + Alias types.String `tfsdk:"alias"` + ElasticSearchResourceID types.String `tfsdk:"elasticsearch_resource_id"` + ElasticSearchRefID types.String `tfsdk:"elasticsearch_ref_id"` + KibanaResourceID types.String `tfsdk:"kibana_resource_id"` + KibanaRefID types.String `tfsdk:"kibana_ref_id"` + ApmResourceID types.String `tfsdk:"apm_resource_id"` + ApmRefID types.String `tfsdk:"apm_ref_id"` + IntegrationsServerResourceID types.String `tfsdk:"integrations_server_resource_id"` + IntegrationsServerRefID types.String `tfsdk:"integrations_server_ref_id"` + EnterpriseSearchResourceID types.String `tfsdk:"enterprise_search_resource_id"` + EnterpriseSearchRefID types.String `tfsdk:"enterprise_search_ref_id"` +} + +type resourceFiltersModelV0 struct { + Healthy types.String `tfsdk:"healthy"` + Status types.String `tfsdk:"status"` + Version types.String `tfsdk:"version"` +} diff --git a/ec/ecdatasource/stackdatasource/datasource.go b/ec/ecdatasource/stackdatasource/datasource.go index c84bd1861..f0fa5002e 100644 --- a/ec/ecdatasource/stackdatasource/datasource.go +++ b/ec/ecdatasource/stackdatasource/datasource.go @@ -20,64 +20,100 @@ package stackdatasource import ( "context" "fmt" - "regexp" - "strconv" - "time" - - "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/stackapi" "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/terraform-provider-ec/ec/internal" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "regexp" ) -// DataSource returns the ec_deployment data source schema. -func DataSource() *schema.Resource { - return &schema.Resource{ - ReadContext: read, +var _ provider.DataSourceType = (*DataSourceType)(nil) - Schema: newSchema(), +type DataSourceType struct{} - Timeouts: &schema.ResourceTimeout{ - Default: schema.DefaultTimeout(5 * time.Minute), - }, - } +func (s DataSourceType) NewDataSource(ctx context.Context, p provider.Provider) (datasource.DataSource, diag.Diagnostics) { + return &stackDataSource{ + p: p.(internal.Provider), + }, nil +} + +var _ datasource.DataSource = (*stackDataSource)(nil) + +type stackDataSource struct { + p internal.Provider } -func read(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*api.API) - region := d.Get("region").(string) +func (d stackDataSource) Read(ctx context.Context, request datasource.ReadRequest, response *datasource.ReadResponse) { + var newState modelV0 + response.Diagnostics.Append(request.Config.Get(ctx, &newState)...) + if response.Diagnostics.HasError() { + return + } res, err := stackapi.List(stackapi.ListParams{ - API: client, - Region: region, + API: d.p.GetClient(), + Region: newState.Region.Value, }) if err != nil { - return diag.FromErr( - multierror.NewPrefixed("failed retrieving the specified stack version", err), + response.Diagnostics.AddError( + "Failed retrieving the specified stack version", + fmt.Sprintf("Failed retrieving the specified stack version: %s", err), ) + return } - versionExpr := d.Get("version_regex").(string) - version := d.Get("version").(string) - lock := d.Get("lock").(bool) - stack, err := stackFromFilters(versionExpr, version, lock, res.Stacks) + stack, err := stackFromFilters(newState.VersionRegex.Value, newState.Version.Value, newState.Lock.Value, res.Stacks) if err != nil { - return diag.FromErr(err) + response.Diagnostics.AddError(err.Error(), err.Error()) + return } - if d.Id() == "" { - d.SetId(strconv.Itoa(schema.HashString(version))) + response.Diagnostics.Append(modelToState(ctx, stack, &newState)...) + if response.Diagnostics.HasError() { + return } - if err := modelToState(d, stack); err != nil { - diag.FromErr(err) + // Finally, set the state + response.Diagnostics.Append(response.State.Set(ctx, newState)...) +} + +func modelToState(ctx context.Context, stack *models.StackVersionConfig, state *modelV0) diag.Diagnostics { + var diags diag.Diagnostics + + state.ID = types.String{Value: stack.Version} + state.Version = types.String{Value: stack.Version} + if stack.Accessible != nil { + state.Accessible = types.Bool{Value: *stack.Accessible} } - return nil + state.MinUpgradableFrom = types.String{Value: stack.MinUpgradableFrom} + + if len(stack.UpgradableTo) > 0 { + diags.Append(tfsdk.ValueFrom(ctx, stack.UpgradableTo, types.ListType{ElemType: types.StringType}, &state.UpgradableTo)...) + } + + if stack.Whitelisted != nil { + state.AllowListed = types.Bool{Value: *stack.Whitelisted} + } + + diags.Append(flattenStackVersionApmConfig(ctx, stack.Apm, &state.Apm)...) + diags.Append(flattenStackVersionElasticsearchConfig(ctx, stack.Elasticsearch, &state.Elasticsearch)...) + diags.Append(flattenStackVersionEnterpriseSearchConfig(ctx, stack.EnterpriseSearch, &state.EnterpriseSearch)...) + diags.Append(flattenStackVersionKibanaConfig(ctx, stack.Kibana, &state.Kibana)...) + + return diags } +/* TODO - see https://github.com/multani/terraform-provider-camunda/pull/16/files +Timeouts: &schema.ResourceTimeout{ + Default: schema.DefaultTimeout(5 * time.Minute), +}, +*/ + func stackFromFilters(expr, version string, locked bool, stacks []*models.StackVersionConfig) (*models.StackVersionConfig, error) { if expr == "latest" && locked && version != "" { expr = version @@ -103,52 +139,11 @@ func stackFromFilters(expr, version string, locked bool, stacks []*models.StackV ) } -func modelToState(d *schema.ResourceData, stack *models.StackVersionConfig) error { - if stack == nil { - return nil - } - - if err := d.Set("version", stack.Version); err != nil { - return err +func newResourceKindConfigModelV0() resourceKindConfigModelV0 { + return resourceKindConfigModelV0{ + DenyList: types.List{ElemType: types.StringType}, + CompatibleNodeTypes: types.List{ElemType: types.StringType}, + Plugins: types.List{ElemType: types.StringType}, + DefaultPlugins: types.List{ElemType: types.StringType}, } - - if stack.Accessible != nil { - if err := d.Set("accessible", *stack.Accessible); err != nil { - return err - } - } - - if err := d.Set("min_upgradable_from", stack.MinUpgradableFrom); err != nil { - return err - } - - if len(stack.UpgradableTo) > 0 { - if err := d.Set("upgradable_to", stack.UpgradableTo); err != nil { - return err - } - } - - if stack.Whitelisted != nil { - if err := d.Set("allowlisted", *stack.Whitelisted); err != nil { - return err - } - } - - if err := d.Set("apm", flattenApmResources(stack.Apm)); err != nil { - return err - } - - if err := d.Set("elasticsearch", flattenElasticsearchResources(stack.Elasticsearch)); err != nil { - return err - } - - if err := d.Set("enterprise_search", flattenEnterpriseSearchResources(stack.EnterpriseSearch)); err != nil { - return err - } - - if err := d.Set("kibana", flattenKibanaResources(stack.Kibana)); err != nil { - return err - } - - return nil } diff --git a/ec/ecdatasource/stackdatasource/datasource_test.go b/ec/ecdatasource/stackdatasource/datasource_test.go index a86d71d56..f031550ef 100644 --- a/ec/ecdatasource/stackdatasource/datasource_test.go +++ b/ec/ecdatasource/stackdatasource/datasource_test.go @@ -18,46 +18,41 @@ package stackdatasource import ( + "context" "errors" "fmt" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" "regexp/syntax" "testing" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/stretchr/testify/assert" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_modelToState(t *testing.T) { - deploymentSchemaArg := schema.TestResourceDataRaw(t, newSchema(), nil) - deploymentSchemaArg.SetId("someid") - _ = deploymentSchemaArg.Set("region", "us-east-1") - _ = deploymentSchemaArg.Set("version_regex", "latest") - - wantDeployment := util.NewResourceData(t, util.ResDataParams{ - ID: "someid", - State: newSampleStack(), - Schema: newSchema(), - }) + state := modelV0{ + Region: types.String{Value: "us-east-1"}, + VersionRegex: types.String{Value: "latest"}, + } type args struct { - d *schema.ResourceData - res *models.StackVersionConfig + state modelV0 + res *models.StackVersionConfig } tests := []struct { name string args args - want *schema.ResourceData + want modelV0 err error }{ { - name: "flattens deployment resources", - want: wantDeployment, + name: "flattens stack resources", + want: newSampleStack(), args: args{ - d: deploymentSchemaArg, + state: state, res: &models.StackVersionConfig{ Version: "7.9.1", Accessible: ec.Bool(true), @@ -117,68 +112,106 @@ func Test_modelToState(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := modelToState(tt.args.d, tt.args.res) - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } + state = tt.args.state + diags := modelToState(context.Background(), tt.args.res, &state) + assert.Empty(t, diags) - assert.Equal(t, tt.want.State().Attributes, tt.args.d.State().Attributes) + assert.Equal(t, tt.want, state) }) } } -func newSampleStack() map[string]interface{} { - return map[string]interface{}{ - "id": "someid", - "region": "us-east-1", - "version_regex": "latest", - - "version": "7.9.1", - "accessible": true, - "allowlisted": true, - "min_upgradable_from": "6.8.0", - "elasticsearch": []interface{}{map[string]interface{}{ - "denylist": []interface{}{"some"}, - "capacity_constraints_max": 8192, - "capacity_constraints_min": 512, - "default_plugins": []interface{}{"repository-s3"}, - "docker_image": "docker.elastic.co/cloud-assets/elasticsearch:7.9.1-0", - "plugins": []interface{}{ - "analysis-icu", - "analysis-kuromoji", - "analysis-nori", - "analysis-phonetic", - "analysis-smartcn", - "analysis-stempel", - "analysis-ukrainian", - "ingest-attachment", - "mapper-annotated-text", - "mapper-murmur3", - "mapper-size", - "repository-azure", - "repository-gcs", +func newSampleStack() modelV0 { + return modelV0{ + ID: types.String{Value: "7.9.1"}, + Region: types.String{Value: "us-east-1"}, + Version: types.String{Value: "7.9.1"}, + VersionRegex: types.String{Value: "latest"}, + Accessible: types.Bool{Value: true}, + AllowListed: types.Bool{Value: true}, + MinUpgradableFrom: types.String{Value: "6.8.0"}, + Elasticsearch: types.List{ + ElemType: types.ObjectType{ + AttrTypes: resourceKindConfigAttrTypes(), + }, + Elems: []attr.Value{types.Object{ + AttrTypes: resourceKindConfigAttrTypes(), + Attrs: map[string]attr.Value{ + "denylist": util.StringListAsType([]string{"some"}), + "capacity_constraints_max": types.Int64{Value: 8192}, + "capacity_constraints_min": types.Int64{Value: 512}, + "compatible_node_types": util.StringListAsType(nil), + "docker_image": types.String{Value: "docker.elastic.co/cloud-assets/elasticsearch:7.9.1-0"}, + "plugins": util.StringListAsType([]string{ + "analysis-icu", + "analysis-kuromoji", + "analysis-nori", + "analysis-phonetic", + "analysis-smartcn", + "analysis-stempel", + "analysis-ukrainian", + "ingest-attachment", + "mapper-annotated-text", + "mapper-murmur3", + "mapper-size", + "repository-azure", + "repository-gcs", + }), + "default_plugins": util.StringListAsType([]string{"repository-s3"}), + }, + }}, + }, + Kibana: types.List{ + ElemType: types.ObjectType{ + AttrTypes: resourceKindConfigAttrTypes(), }, - }}, - "kibana": []interface{}{map[string]interface{}{ - "denylist": []interface{}{"some"}, - "capacity_constraints_max": 8192, - "capacity_constraints_min": 512, - "docker_image": "docker.elastic.co/cloud-assets/kibana:7.9.1-0", - }}, - "apm": []interface{}{map[string]interface{}{ - "denylist": []interface{}{"some"}, - "capacity_constraints_max": 8192, - "capacity_constraints_min": 512, - "docker_image": "docker.elastic.co/cloud-assets/apm:7.9.1-0", - }}, - "enterprise_search": []interface{}{map[string]interface{}{ - "denylist": []interface{}{"some"}, - "capacity_constraints_max": 8192, - "capacity_constraints_min": 512, - "docker_image": "docker.elastic.co/cloud-assets/enterprise_search:7.9.1-0", - }}, + Elems: []attr.Value{types.Object{ + AttrTypes: resourceKindConfigAttrTypes(), + Attrs: map[string]attr.Value{ + "denylist": util.StringListAsType([]string{"some"}), + "capacity_constraints_max": types.Int64{Value: 8192}, + "capacity_constraints_min": types.Int64{Value: 512}, + "compatible_node_types": util.StringListAsType(nil), + "docker_image": types.String{Value: "docker.elastic.co/cloud-assets/kibana:7.9.1-0"}, + "plugins": util.StringListAsType(nil), + "default_plugins": util.StringListAsType(nil), + }, + }}, + }, + EnterpriseSearch: types.List{ + ElemType: types.ObjectType{ + AttrTypes: resourceKindConfigAttrTypes(), + }, + Elems: []attr.Value{types.Object{ + AttrTypes: resourceKindConfigAttrTypes(), + Attrs: map[string]attr.Value{ + "denylist": util.StringListAsType([]string{"some"}), + "capacity_constraints_max": types.Int64{Value: 8192}, + "capacity_constraints_min": types.Int64{Value: 512}, + "compatible_node_types": util.StringListAsType(nil), + "docker_image": types.String{Value: "docker.elastic.co/cloud-assets/enterprise_search:7.9.1-0"}, + "plugins": util.StringListAsType(nil), + "default_plugins": util.StringListAsType(nil), + }, + }}, + }, + Apm: types.List{ + ElemType: types.ObjectType{ + AttrTypes: resourceKindConfigAttrTypes(), + }, + Elems: []attr.Value{types.Object{ + AttrTypes: resourceKindConfigAttrTypes(), + Attrs: map[string]attr.Value{ + "denylist": util.StringListAsType([]string{"some"}), + "capacity_constraints_max": types.Int64{Value: 8192}, + "capacity_constraints_min": types.Int64{Value: 512}, + "compatible_node_types": util.StringListAsType(nil), + "docker_image": types.String{Value: "docker.elastic.co/cloud-assets/apm:7.9.1-0"}, + "plugins": util.StringListAsType(nil), + "default_plugins": util.StringListAsType(nil), + }, + }}, + }, } } diff --git a/ec/ecdatasource/stackdatasource/flatteners_apm.go b/ec/ecdatasource/stackdatasource/flatteners_apm.go index e85395309..68b3b9f78 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_apm.go +++ b/ec/ecdatasource/stackdatasource/flatteners_apm.go @@ -18,40 +18,53 @@ package stackdatasource import ( + "context" "github.com/elastic/cloud-sdk-go/pkg/models" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" ) -// flattenApmResources takes in Apm resource models and returns its -// flattened form. -func flattenApmResources(res *models.StackVersionApmConfig) []interface{} { - var m = make(map[string]interface{}) +// flattenStackVersionApmConfig takes a StackVersionApmConfigs and flattens it. +func flattenStackVersionApmConfig(ctx context.Context, res *models.StackVersionApmConfig, target interface{}) diag.Diagnostics { + var diags diag.Diagnostics + model := newResourceKindConfigModelV0() + empty := true if res == nil { - return nil + return diags } if len(res.Blacklist) > 0 { - m["denylist"] = util.StringToItems(res.Blacklist...) + diags.Append(tfsdk.ValueFrom(ctx, res.Blacklist, types.ListType{ElemType: types.StringType}, &model.DenyList)...) + empty = false } if res.CapacityConstraints != nil { - m["capacity_constraints_max"] = int(*res.CapacityConstraints.Max) - m["capacity_constraints_min"] = int(*res.CapacityConstraints.Min) + model.CapacityConstraintsMax = types.Int64{Value: int64(*res.CapacityConstraints.Max)} + model.CapacityConstraintsMin = types.Int64{Value: int64(*res.CapacityConstraints.Min)} + empty = false } if len(res.CompatibleNodeTypes) > 0 { - m["compatible_node_types"] = res.CompatibleNodeTypes + diags.Append(tfsdk.ValueFrom(ctx, res.CompatibleNodeTypes, types.ListType{ElemType: types.StringType}, &model.CompatibleNodeTypes)...) + empty = false } if res.DockerImage != nil && *res.DockerImage != "" { - m["docker_image"] = *res.DockerImage + model.DockerImage = types.String{Value: *res.DockerImage} + empty = false } - if len(m) == 0 { - return nil + if empty { + return diags } - return []interface{}{m} + diags.Append(tfsdk.ValueFrom(ctx, []resourceKindConfigModelV0{model}, types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: resourceKindConfigAttrTypes(), + }, + }, target)...) + + return diags } diff --git a/ec/ecdatasource/stackdatasource/flatteners_apm_test.go b/ec/ecdatasource/stackdatasource/flatteners_apm_test.go index bc82b7d28..e12515e61 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_apm_test.go +++ b/ec/ecdatasource/stackdatasource/flatteners_apm_test.go @@ -18,11 +18,14 @@ package stackdatasource import ( + "context" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" "testing" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" ) func Test_flattenApmResource(t *testing.T) { @@ -32,7 +35,7 @@ func Test_flattenApmResource(t *testing.T) { tests := []struct { name string args args - want []interface{} + want []resourceKindConfigModelV0 }{ { name: "empty resource list returns empty list", @@ -54,17 +57,25 @@ func Test_flattenApmResource(t *testing.T) { }, DockerImage: ec.String("docker.elastic.co/cloud-assets/apm:7.9.1-0"), }}, - want: []interface{}{map[string]interface{}{ - "denylist": []interface{}{"some"}, - "capacity_constraints_max": 8192, - "capacity_constraints_min": 512, - "docker_image": "docker.elastic.co/cloud-assets/apm:7.9.1-0", + want: []resourceKindConfigModelV0{{ + DenyList: util.StringListAsType([]string{"some"}), + CapacityConstraintsMax: types.Int64{Value: 8192}, + CapacityConstraintsMin: types.Int64{Value: 512}, + CompatibleNodeTypes: util.StringListAsType(nil), + DockerImage: types.String{Value: "docker.elastic.co/cloud-assets/apm:7.9.1-0"}, + Plugins: util.StringListAsType(nil), + DefaultPlugins: util.StringListAsType(nil), }}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenApmResources(tt.args.res) + var newState modelV0 + diags := flattenStackVersionApmConfig(context.Background(), tt.args.res, &newState.Apm) + assert.Empty(t, diags) + + var got []resourceKindConfigModelV0 + newState.Apm.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) }) } diff --git a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go index 1210111e7..39e2908a3 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go +++ b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go @@ -18,48 +18,63 @@ package stackdatasource import ( + "context" "github.com/elastic/cloud-sdk-go/pkg/models" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" ) -// flattenElasticsearchResources takes in Elasticsearch resource models and returns its -// flattened form. -func flattenElasticsearchResources(res *models.StackVersionElasticsearchConfig) []interface{} { - var m = make(map[string]interface{}) +// flattenStackVersionElasticsearchConfig takes a StackVersionElasticsearchConfig and flattens it. +func flattenStackVersionElasticsearchConfig(ctx context.Context, res *models.StackVersionElasticsearchConfig, target interface{}) diag.Diagnostics { + var diags diag.Diagnostics + model := newResourceKindConfigModelV0() + empty := true if res == nil { - return nil + return diags } if len(res.Blacklist) > 0 { - m["denylist"] = util.StringToItems(res.Blacklist...) + diags.Append(tfsdk.ValueFrom(ctx, res.Blacklist, types.ListType{ElemType: types.StringType}, &model.DenyList)...) + empty = false } if res.CapacityConstraints != nil { - m["capacity_constraints_max"] = int(*res.CapacityConstraints.Max) - m["capacity_constraints_min"] = int(*res.CapacityConstraints.Min) + model.CapacityConstraintsMax = types.Int64{Value: int64(*res.CapacityConstraints.Max)} + model.CapacityConstraintsMin = types.Int64{Value: int64(*res.CapacityConstraints.Min)} + empty = false } if len(res.CompatibleNodeTypes) > 0 { - m["compatible_node_types"] = util.StringToItems(res.CompatibleNodeTypes...) + diags.Append(tfsdk.ValueFrom(ctx, res.CompatibleNodeTypes, types.ListType{ElemType: types.StringType}, &model.CompatibleNodeTypes)...) + empty = false } if res.DockerImage != nil && *res.DockerImage != "" { - m["docker_image"] = *res.DockerImage + model.DockerImage = types.String{Value: *res.DockerImage} + empty = false } if len(res.Plugins) > 0 { - m["plugins"] = util.StringToItems(res.Plugins...) + diags.Append(tfsdk.ValueFrom(ctx, res.Plugins, types.ListType{ElemType: types.StringType}, &model.Plugins)...) + empty = false } if len(res.DefaultPlugins) > 0 { - m["default_plugins"] = util.StringToItems(res.DefaultPlugins...) + diags.Append(tfsdk.ValueFrom(ctx, res.DefaultPlugins, types.ListType{ElemType: types.StringType}, &model.DefaultPlugins)...) + empty = false } - if len(m) == 0 { - return nil + if empty { + return diags } - return []interface{}{m} + diags.Append(tfsdk.ValueFrom(ctx, []resourceKindConfigModelV0{model}, types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: resourceKindConfigAttrTypes(), + }, + }, target)...) + + return diags } diff --git a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go index e6448f486..6643db967 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go +++ b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go @@ -18,21 +18,24 @@ package stackdatasource import ( + "context" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" "testing" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" ) -func Test_flattenElasticsearchResources(t *testing.T) { +func Test_flattenElasticsearchResource(t *testing.T) { type args struct { res *models.StackVersionElasticsearchConfig } tests := []struct { name string args args - want []interface{} + want []resourceKindConfigModelV0 }{ { name: "empty resource list returns empty list", @@ -70,13 +73,13 @@ func Test_flattenElasticsearchResources(t *testing.T) { "repository-gcs", }, }}, - want: []interface{}{map[string]interface{}{ - "denylist": []interface{}{"some"}, - "capacity_constraints_max": 8192, - "capacity_constraints_min": 512, - "default_plugins": []interface{}{"repository-s3"}, - "docker_image": "docker.elastic.co/cloud-assets/elasticsearch:7.9.1-0", - "plugins": []interface{}{ + want: []resourceKindConfigModelV0{{ + DenyList: util.StringListAsType([]string{"some"}), + CapacityConstraintsMax: types.Int64{Value: 8192}, + CapacityConstraintsMin: types.Int64{Value: 512}, + CompatibleNodeTypes: util.StringListAsType(nil), + DockerImage: types.String{Value: "docker.elastic.co/cloud-assets/elasticsearch:7.9.1-0"}, + Plugins: util.StringListAsType([]string{ "analysis-icu", "analysis-kuromoji", "analysis-nori", @@ -90,13 +93,19 @@ func Test_flattenElasticsearchResources(t *testing.T) { "mapper-size", "repository-azure", "repository-gcs", - }, + }), + DefaultPlugins: util.StringListAsType([]string{"repository-s3"}), }}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenElasticsearchResources(tt.args.res) + var newState modelV0 + diags := flattenStackVersionElasticsearchConfig(context.Background(), tt.args.res, &newState.Elasticsearch) + assert.Empty(t, diags) + + var got []resourceKindConfigModelV0 + newState.Elasticsearch.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) }) } diff --git a/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go b/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go index d20ca04f5..fb34b449b 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go +++ b/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go @@ -18,40 +18,53 @@ package stackdatasource import ( + "context" "github.com/elastic/cloud-sdk-go/pkg/models" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" ) -// flattenEnterpriseSearchResources takes in EnterpriseSearch resource models and returns its -// flattened form. -func flattenEnterpriseSearchResources(res *models.StackVersionEnterpriseSearchConfig) []interface{} { - var m = make(map[string]interface{}) +// flattenStackVersionEnterpriseSearchConfig takes a StackVersionEnterpriseSearchConfig and flattens it. +func flattenStackVersionEnterpriseSearchConfig(ctx context.Context, res *models.StackVersionEnterpriseSearchConfig, target interface{}) diag.Diagnostics { + var diags diag.Diagnostics + model := newResourceKindConfigModelV0() + empty := true if res == nil { - return nil + return diags } if len(res.Blacklist) > 0 { - m["denylist"] = util.StringToItems(res.Blacklist...) + diags.Append(tfsdk.ValueFrom(ctx, res.Blacklist, types.ListType{ElemType: types.StringType}, &model.DenyList)...) + empty = false } if res.CapacityConstraints != nil { - m["capacity_constraints_max"] = int(*res.CapacityConstraints.Max) - m["capacity_constraints_min"] = int(*res.CapacityConstraints.Min) + model.CapacityConstraintsMax = types.Int64{Value: int64(*res.CapacityConstraints.Max)} + model.CapacityConstraintsMin = types.Int64{Value: int64(*res.CapacityConstraints.Min)} + empty = false } if len(res.CompatibleNodeTypes) > 0 { - m["compatible_node_types"] = res.CompatibleNodeTypes + diags.Append(tfsdk.ValueFrom(ctx, res.CompatibleNodeTypes, types.ListType{ElemType: types.StringType}, &model.CompatibleNodeTypes)...) + empty = false } if res.DockerImage != nil && *res.DockerImage != "" { - m["docker_image"] = *res.DockerImage + model.DockerImage = types.String{Value: *res.DockerImage} + empty = false } - if len(m) == 0 { - return nil + if empty { + return diags } - return []interface{}{m} + diags.Append(tfsdk.ValueFrom(ctx, []resourceKindConfigModelV0{model}, types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: resourceKindConfigAttrTypes(), + }, + }, target)...) + + return diags } diff --git a/ec/ecdatasource/stackdatasource/flatteners_enterprise_search_test.go b/ec/ecdatasource/stackdatasource/flatteners_enterprise_search_test.go index 70195b5d3..a781c3284 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_enterprise_search_test.go +++ b/ec/ecdatasource/stackdatasource/flatteners_enterprise_search_test.go @@ -18,11 +18,14 @@ package stackdatasource import ( + "context" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" "testing" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" ) func Test_flattenEnterpriseSearchResources(t *testing.T) { @@ -32,7 +35,7 @@ func Test_flattenEnterpriseSearchResources(t *testing.T) { tests := []struct { name string args args - want []interface{} + want []resourceKindConfigModelV0 }{ { name: "empty resource list returns empty list", @@ -54,17 +57,25 @@ func Test_flattenEnterpriseSearchResources(t *testing.T) { }, DockerImage: ec.String("docker.elastic.co/cloud-assets/enterprise_search:7.9.1-0"), }}, - want: []interface{}{map[string]interface{}{ - "denylist": []interface{}{"some"}, - "capacity_constraints_max": 8192, - "capacity_constraints_min": 512, - "docker_image": "docker.elastic.co/cloud-assets/enterprise_search:7.9.1-0", + want: []resourceKindConfigModelV0{{ + DenyList: util.StringListAsType([]string{"some"}), + CapacityConstraintsMax: types.Int64{Value: 8192}, + CapacityConstraintsMin: types.Int64{Value: 512}, + CompatibleNodeTypes: util.StringListAsType(nil), + DockerImage: types.String{Value: "docker.elastic.co/cloud-assets/enterprise_search:7.9.1-0"}, + Plugins: util.StringListAsType(nil), + DefaultPlugins: util.StringListAsType(nil), }}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenEnterpriseSearchResources(tt.args.res) + var newState modelV0 + diags := flattenStackVersionEnterpriseSearchConfig(context.Background(), tt.args.res, &newState.EnterpriseSearch) + assert.Empty(t, diags) + + var got []resourceKindConfigModelV0 + newState.EnterpriseSearch.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) }) } diff --git a/ec/ecdatasource/stackdatasource/flatteners_kibana.go b/ec/ecdatasource/stackdatasource/flatteners_kibana.go index 5401df150..63dfe6680 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_kibana.go +++ b/ec/ecdatasource/stackdatasource/flatteners_kibana.go @@ -18,40 +18,53 @@ package stackdatasource import ( + "context" "github.com/elastic/cloud-sdk-go/pkg/models" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" ) -// flattenKibanaResources takes in Kibana resource models and returns its -// flattened form. -func flattenKibanaResources(res *models.StackVersionKibanaConfig) []interface{} { - var m = make(map[string]interface{}) +// flattenStackVersionKibanaConfig takes a StackVersionKibanaConfig and flattens it. +func flattenStackVersionKibanaConfig(ctx context.Context, res *models.StackVersionKibanaConfig, target interface{}) diag.Diagnostics { + var diags diag.Diagnostics + model := newResourceKindConfigModelV0() + empty := true if res == nil { - return nil + return diags } if len(res.Blacklist) > 0 { - m["denylist"] = util.StringToItems(res.Blacklist...) + diags.Append(tfsdk.ValueFrom(ctx, res.Blacklist, types.ListType{ElemType: types.StringType}, &model.DenyList)...) + empty = false } if res.CapacityConstraints != nil { - m["capacity_constraints_max"] = int(*res.CapacityConstraints.Max) - m["capacity_constraints_min"] = int(*res.CapacityConstraints.Min) + model.CapacityConstraintsMax = types.Int64{Value: int64(*res.CapacityConstraints.Max)} + model.CapacityConstraintsMin = types.Int64{Value: int64(*res.CapacityConstraints.Min)} + empty = false } if len(res.CompatibleNodeTypes) > 0 { - m["compatible_node_types"] = util.StringToItems(res.CompatibleNodeTypes...) + diags.Append(tfsdk.ValueFrom(ctx, res.CompatibleNodeTypes, types.ListType{ElemType: types.StringType}, &model.CompatibleNodeTypes)...) + empty = false } if res.DockerImage != nil && *res.DockerImage != "" { - m["docker_image"] = *res.DockerImage + model.DockerImage = types.String{Value: *res.DockerImage} + empty = false } - if len(m) == 0 { - return nil + if empty { + return diags } - return []interface{}{m} + diags.Append(tfsdk.ValueFrom(ctx, []resourceKindConfigModelV0{model}, types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: resourceKindConfigAttrTypes(), + }, + }, target)...) + + return diags } diff --git a/ec/ecdatasource/stackdatasource/flatteners_kibana_test.go b/ec/ecdatasource/stackdatasource/flatteners_kibana_test.go index 74914db5e..b9ca34ec8 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_kibana_test.go +++ b/ec/ecdatasource/stackdatasource/flatteners_kibana_test.go @@ -18,11 +18,14 @@ package stackdatasource import ( + "context" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" "testing" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" ) func Test_flattenKibanaResources(t *testing.T) { @@ -32,7 +35,7 @@ func Test_flattenKibanaResources(t *testing.T) { tests := []struct { name string args args - want []interface{} + want []resourceKindConfigModelV0 }{ { name: "empty resource list returns empty list", @@ -54,17 +57,25 @@ func Test_flattenKibanaResources(t *testing.T) { }, DockerImage: ec.String("docker.elastic.co/cloud-assets/kibana:7.9.1-0"), }}, - want: []interface{}{map[string]interface{}{ - "denylist": []interface{}{"some"}, - "capacity_constraints_max": 8192, - "capacity_constraints_min": 512, - "docker_image": "docker.elastic.co/cloud-assets/kibana:7.9.1-0", + want: []resourceKindConfigModelV0{{ + DenyList: util.StringListAsType([]string{"some"}), + CapacityConstraintsMax: types.Int64{Value: 8192}, + CapacityConstraintsMin: types.Int64{Value: 512}, + CompatibleNodeTypes: util.StringListAsType(nil), + DockerImage: types.String{Value: "docker.elastic.co/cloud-assets/kibana:7.9.1-0"}, + Plugins: util.StringListAsType(nil), + DefaultPlugins: util.StringListAsType(nil), }}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenKibanaResources(tt.args.res) + var newState modelV0 + diags := flattenStackVersionKibanaConfig(context.Background(), tt.args.res, &newState.Kibana) + assert.Empty(t, diags) + + var got []resourceKindConfigModelV0 + newState.Kibana.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) }) } diff --git a/ec/ecdatasource/stackdatasource/schema.go b/ec/ecdatasource/stackdatasource/schema.go index 79621176c..df15bc0dd 100644 --- a/ec/ecdatasource/stackdatasource/schema.go +++ b/ec/ecdatasource/stackdatasource/schema.go @@ -18,103 +18,112 @@ package stackdatasource import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "context" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" ) -func newSchema() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "version_regex": { - Type: schema.TypeString, - Required: true, - }, - "region": { - Type: schema.TypeString, - Required: true, - }, - "lock": { - Type: schema.TypeBool, - Optional: true, - }, - - // Exported attributes - "version": { - Type: schema.TypeString, - Computed: true, - }, - "accessible": { - Type: schema.TypeBool, - Computed: true, - }, - "min_upgradable_from": { - Type: schema.TypeString, - Computed: true, - }, - "upgradable_to": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "allowlisted": { - Type: schema.TypeBool, - Computed: true, - }, - - "apm": newKindResourceSchema(), - "enterprise_search": newKindResourceSchema(), - "elasticsearch": newKindResourceSchema(), - "kibana": newKindResourceSchema(), - } -} - -func newKindResourceSchema() *schema.Schema { - return &schema.Schema{ - Computed: true, - Type: schema.TypeList, - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "denylist": { - Computed: true, - Type: schema.TypeList, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, +func (s DataSourceType) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { + return tfsdk.Schema{ + Attributes: map[string]tfsdk.Attribute{ + "version_regex": { + Type: types.StringType, + Required: true, }, - "capacity_constraints_max": { - Type: schema.TypeInt, - Computed: true, + "region": { + Type: types.StringType, + Required: true, }, - "capacity_constraints_min": { - Type: schema.TypeInt, + "lock": { + Type: types.BoolType, + Optional: true, + }, + + // Computed attributes + "id": { + Type: types.StringType, + Computed: true, + MarkdownDescription: "Unique identifier of this data source.", + }, + "version": { + Type: types.StringType, Computed: true, }, - "compatible_node_types": { + "accessible": { + Type: types.BoolType, Computed: true, - Type: schema.TypeList, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, }, - "docker_image": { - Type: schema.TypeString, + "min_upgradable_from": { + Type: types.StringType, Computed: true, }, - "plugins": { + "upgradable_to": { + Type: types.ListType{ElemType: types.StringType}, Computed: true, - Type: schema.TypeList, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, }, - "default_plugins": { + "allowlisted": { + Type: types.BoolType, Computed: true, - Type: schema.TypeList, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, }, - // node_types not added. It is highly unlikely they will be used - // for anything, and if they're needed in the future, then we can - // invest on adding them. + "apm": kindResourceSchema(), + "enterprise_search": kindResourceSchema(), + "elasticsearch": kindResourceSchema(), + "kibana": kindResourceSchema(), + }, + }, nil +} + +func kindResourceSchema() tfsdk.Attribute { + // TODO should we use tfsdk.ListNestedAttributes here? - see https://github.com/hashicorp/terraform-provider-hashicups-pf/blob/8f222d805d39445673e442a674168349a45bc054/hashicups/data_source_coffee.go#L22 + return tfsdk.Attribute{ + Computed: true, + Type: types.ListType{ElemType: types.ObjectType{ + AttrTypes: resourceKindConfigAttrTypes(), }}, } } + +func resourceKindConfigAttrTypes() map[string]attr.Type { + return map[string]attr.Type{ + "denylist": types.ListType{ElemType: types.StringType}, + "capacity_constraints_max": types.Int64Type, + "capacity_constraints_min": types.Int64Type, + "compatible_node_types": types.ListType{ElemType: types.StringType}, + "docker_image": types.StringType, + "plugins": types.ListType{ElemType: types.StringType}, + "default_plugins": types.ListType{ElemType: types.StringType}, + + // node_types not added. It is highly unlikely they will be used + // for anything, and if they're needed in the future, then we can + // invest on adding them. + } +} + +type modelV0 struct { + ID types.String `tfsdk:"id"` + VersionRegex types.String `tfsdk:"version_regex"` + Region types.String `tfsdk:"region"` + Lock types.Bool `tfsdk:"lock"` + Version types.String `tfsdk:"version"` + Accessible types.Bool `tfsdk:"accessible"` + MinUpgradableFrom types.String `tfsdk:"min_upgradable_from"` + UpgradableTo types.List `tfsdk:"upgradable_to"` + AllowListed types.Bool `tfsdk:"allowlisted"` + Apm types.List `tfsdk:"apm"` //< resourceKindConfigModelV0 + EnterpriseSearch types.List `tfsdk:"enterprise_search"` //< resourceKindConfigModelV0 + Elasticsearch types.List `tfsdk:"elasticsearch"` //< resourceKindConfigModelV0 + Kibana types.List `tfsdk:"kibana"` //< resourceKindConfigModelV0 +} + +type resourceKindConfigModelV0 struct { + DenyList types.List `tfsdk:"denylist"` + CapacityConstraintsMax types.Int64 `tfsdk:"capacity_constraints_max"` + CapacityConstraintsMin types.Int64 `tfsdk:"capacity_constraints_min"` + CompatibleNodeTypes types.List `tfsdk:"compatible_node_types"` + DockerImage types.String `tfsdk:"docker_image"` + Plugins types.List `tfsdk:"plugins"` + DefaultPlugins types.List `tfsdk:"default_plugins"` +} diff --git a/ec/internal/planmodifier/default_from_env.go b/ec/internal/planmodifier/default_from_env.go new file mode 100644 index 000000000..04de4cc43 --- /dev/null +++ b/ec/internal/planmodifier/default_from_env.go @@ -0,0 +1,45 @@ +package planmodifier + +import ( + "context" + "fmt" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// defaultFromEnvAttributePlanModifier specifies a default value (attr.Value) for an attribute. +type defaultFromEnvAttributePlanModifier struct { + EnvKeys []string +} + +// DefaultFromEnv is a helper to instantiate a defaultFromEnvAttributePlanModifier. +func DefaultFromEnv(envKeys []string) tfsdk.AttributePlanModifier { + return &defaultFromEnvAttributePlanModifier{envKeys} +} + +var _ tfsdk.AttributePlanModifier = (*defaultFromEnvAttributePlanModifier)(nil) + +func (m *defaultFromEnvAttributePlanModifier) Description(ctx context.Context) string { + return m.MarkdownDescription(ctx) +} + +func (m *defaultFromEnvAttributePlanModifier) MarkdownDescription(ctx context.Context) string { + return fmt.Sprintf("Sets the default value from an environment variable (%v) if the attribute is not set", m.EnvKeys) +} + +func (m *defaultFromEnvAttributePlanModifier) Modify(_ context.Context, req tfsdk.ModifyAttributePlanRequest, res *tfsdk.ModifyAttributePlanResponse) { + // If the attribute configuration is not null, we are done here + if !req.AttributeConfig.IsNull() { + return + } + + // If the attribute plan is "known" and "not null", then a previous plan m in the sequence + // has already been applied, and we don't want to interfere. + if !req.AttributePlan.IsUnknown() && !req.AttributePlan.IsNull() { + return + } + + res.AttributePlan = types.String{Value: util.MultiGetenv(m.EnvKeys, "")} +} diff --git a/ec/internal/planmodifier/default_value.go b/ec/internal/planmodifier/default_value.go new file mode 100644 index 000000000..6f3b8e4b9 --- /dev/null +++ b/ec/internal/planmodifier/default_value.go @@ -0,0 +1,45 @@ +// NOTE! copied from terraform-provider-tls +package planmodifier + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// defaultValueAttributePlanModifier specifies a default value (attr.Value) for an attribute. +type defaultValueAttributePlanModifier struct { + DefaultValue attr.Value +} + +// DefaultValue is a helper to instantiate a defaultValueAttributePlanModifier. +func DefaultValue(v attr.Value) tfsdk.AttributePlanModifier { + return &defaultValueAttributePlanModifier{v} +} + +var _ tfsdk.AttributePlanModifier = (*defaultValueAttributePlanModifier)(nil) + +func (m *defaultValueAttributePlanModifier) Description(ctx context.Context) string { + return m.MarkdownDescription(ctx) +} + +func (m *defaultValueAttributePlanModifier) MarkdownDescription(ctx context.Context) string { + return fmt.Sprintf("Sets the default value %q (%s) if the attribute is not set", m.DefaultValue, m.DefaultValue.Type(ctx)) +} + +func (m *defaultValueAttributePlanModifier) Modify(_ context.Context, req tfsdk.ModifyAttributePlanRequest, res *tfsdk.ModifyAttributePlanResponse) { + // If the attribute configuration is not null, we are done here + if !req.AttributeConfig.IsNull() { + return + } + + // If the attribute plan is "known" and "not null", then a previous plan m in the sequence + // has already been applied, and we don't want to interfere. + if !req.AttributePlan.IsUnknown() && !req.AttributePlan.IsNull() { + return + } + + res.AttributePlan = m.DefaultValue +} diff --git a/ec/internal/provider.go b/ec/internal/provider.go new file mode 100644 index 000000000..b407dd1ca --- /dev/null +++ b/ec/internal/provider.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package internal + +import ( + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/hashicorp/terraform-plugin-framework/provider" +) + +// Provider is an interface required to avoid import cycles in datasource / resource packages +type Provider interface { + provider.Provider + GetClient() *api.API +} diff --git a/ec/internal/util/helpers.go b/ec/internal/util/helpers.go index 98625cee5..d00952da5 100644 --- a/ec/internal/util/helpers.go +++ b/ec/internal/util/helpers.go @@ -19,6 +19,11 @@ package util import ( "fmt" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "os" + "strconv" "github.com/elastic/cloud-sdk-go/pkg/models" ) @@ -73,3 +78,64 @@ func IsCurrentEssPlanEmpty(res *models.EnterpriseSearchResourceInfo) bool { var emptyPlanInfo = res.Info == nil || res.Info.PlanInfo == nil || res.Info.PlanInfo.Current == nil return emptyPlanInfo || res.Info.PlanInfo.Current.Plan == nil } + +// MultiGetenv returns the value of the first environment variable in the +// given list that has a non-empty value. If none of the environment +// variables have a value, the default value is returned. +func MultiGetenv(keys []string, defaultValue string) string { + for _, key := range keys { + if value := os.Getenv(key); value != "" { + return value + } + } + return defaultValue +} + +func GetStringFromSchemaOrEnv(d *schema.ResourceData, key string, envKeys []string, defaultValue string) string { + if value, ok := d.GetOk(key); ok { + return value.(string) + } + return MultiGetenv(envKeys, defaultValue) +} +func GetBoolFromSchemaOrEnv(d *schema.ResourceData, key string, envKeys []string) bool { + if value, ok := d.GetOk(key); ok { + return value.(bool) + } + + strValue := MultiGetenv(envKeys, "false") + value, err := StringToBool(strValue) + if err != nil { + return false + } + return value +} + +func StringToBool(str string) (bool, error) { + if str == "" { + return false, nil + } + + v, err := strconv.ParseBool(str) + if err != nil { + return false, err + } + + return v, nil +} + +func StringListAsType(in []string) types.List { + //goland:noinspection GoPreferNilSlice + out := []attr.Value{} + for _, value := range in { + out = append(out, types.String{Value: value}) + } + return types.List{ElemType: types.StringType, Elems: out} +} +func StringMapAsType(in map[string]string) types.Map { + //goland:noinspection GoPreferNilSlice + out := make(map[string]attr.Value, len(in)) + for key, value := range in { + out[key] = types.String{Value: value} + } + return types.Map{ElemType: types.StringType, Elems: out} +} diff --git a/ec/internal/validators/knownvalidator.go b/ec/internal/validators/knownvalidator.go new file mode 100644 index 000000000..82593e66c --- /dev/null +++ b/ec/internal/validators/knownvalidator.go @@ -0,0 +1,40 @@ +package validators + +import ( + "context" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +type knownValidator struct{} + +// Description returns a plain text description of the validator's behavior, suitable for a practitioner to understand its impact. +func (v knownValidator) Description(ctx context.Context) string { + return "Value must be known" +} + +// MarkdownDescription returns a markdown formatted description of the validator's behavior, suitable for a practitioner to understand its impact. +func (v knownValidator) MarkdownDescription(ctx context.Context) string { + return v.Description(ctx) +} + +// Validate runs the main validation logic of the validator, reading configuration data out of `req` and updating `resp` with diagnostics. +func (v knownValidator) Validate(ctx context.Context, req tfsdk.ValidateAttributeRequest, resp *tfsdk.ValidateAttributeResponse) { + if req.AttributeConfig.IsUnknown() { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + v.Description(ctx), + "Value must be known", + ) + return + } +} + +// Known returns an AttributeValidator which ensures that any configured +// attribute value: +// +// - Is known. +// +// Null (unconfigured) values are skipped. +func Known() tfsdk.AttributeValidator { + return knownValidator{} +} diff --git a/ec/internal/validators/urlvalidator.go b/ec/internal/validators/urlvalidator.go new file mode 100644 index 000000000..a66f44d34 --- /dev/null +++ b/ec/internal/validators/urlvalidator.go @@ -0,0 +1,83 @@ +package validators + +import ( + "context" + "fmt" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "golang.org/x/exp/slices" + "net/url" + "strings" +) + +type isURLWithSchemeValidator struct { + ValidSchemes []string +} + +// Description returns a plain text description of the validator's behavior, suitable for a practitioner to understand its impact. +func (v isURLWithSchemeValidator) Description(ctx context.Context) string { + return fmt.Sprintf("Value must be a valid URL with scheme (%s)", strings.Join(v.ValidSchemes, ", ")) +} + +// MarkdownDescription returns a markdown formatted description of the validator's behavior, suitable for a practitioner to understand its impact. +func (v isURLWithSchemeValidator) MarkdownDescription(ctx context.Context) string { + return v.Description(ctx) +} + +// Validate runs the main validation logic of the validator, reading configuration data out of `req` and updating `resp` with diagnostics. +func (v isURLWithSchemeValidator) Validate(ctx context.Context, req tfsdk.ValidateAttributeRequest, resp *tfsdk.ValidateAttributeResponse) { + // types.String must be the attr.Value produced by the attr.Type in the schema for this attribute + // for generic validators, use + // https://pkg.go.dev/github.com/hashicorp/terraform-plugin-framework/tfsdk#ConvertValue + // to convert into a known type. + var str types.String + diags := tfsdk.ValueAs(ctx, req.AttributeConfig, &str) + resp.Diagnostics.Append(diags...) + if diags.HasError() { + return + } + + if str.Unknown || str.Null { + return + } + + if str.Value == "" { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + v.Description(ctx), + fmt.Sprintf("URL must not be empty, got %v.", str), + ) + return + } + + u, err := url.Parse(str.Value) + if err != nil { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + v.Description(ctx), + fmt.Sprintf("URL is invalid, got %v: %+v", str.Value, err), + ) + return + } + + if u.Host == "" { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + v.Description(ctx), + fmt.Sprintf("URL is missing host, got %v", str.Value), + ) + return + } + + if !slices.Contains(v.ValidSchemes, u.Scheme) { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + v.Description(ctx), + fmt.Sprintf("URL is expected to have a valid scheme, got %v (%v)", u.Scheme, str.Value), + ) + } +} + +func IsURLWithSchemeValidator(validSchemes []string) tfsdk.AttributeValidator { + return isURLWithSchemeValidator{} +} diff --git a/ec/provider.go b/ec/provider.go index b88f654bf..917474531 100644 --- a/ec/provider.go +++ b/ec/provider.go @@ -18,13 +18,8 @@ package ec import ( + "context" "fmt" - "time" - - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/elastic/terraform-provider-ec/ec/ecdatasource/deploymentdatasource" "github.com/elastic/terraform-provider-ec/ec/ecdatasource/deploymentsdatasource" "github.com/elastic/terraform-provider-ec/ec/ecdatasource/stackdatasource" @@ -33,6 +28,19 @@ import ( "github.com/elastic/terraform-provider-ec/ec/ecresource/extensionresource" "github.com/elastic/terraform-provider-ec/ec/ecresource/trafficfilterassocresource" "github.com/elastic/terraform-provider-ec/ec/ecresource/trafficfilterresource" + "github.com/elastic/terraform-provider-ec/ec/internal" + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/elastic/terraform-provider-ec/ec/internal/validators" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "time" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" ) const ( @@ -59,16 +67,12 @@ var ( defaultTimeout = 40 * time.Second ) -// Provider returns a schema.Provider. -func Provider() *schema.Provider { +// LegacyProvider returns a schema.Provider. +func LegacyProvider() *schema.Provider { return &schema.Provider{ ConfigureContextFunc: configureAPI, Schema: newSchema(), - DataSourcesMap: map[string]*schema.Resource{ - "ec_deployment": deploymentdatasource.DataSource(), - "ec_deployments": deploymentsdatasource.DataSource(), - "ec_stack": stackdatasource.DataSource(), - }, + DataSourcesMap: map[string]*schema.Resource{}, ResourcesMap: map[string]*schema.Resource{ "ec_deployment": deploymentresource.Resource(), "ec_deployment_elasticsearch_keystore": elasticsearchkeystoreresource.Resource(), @@ -80,84 +84,276 @@ func Provider() *schema.Provider { } func newSchema() map[string]*schema.Schema { + // This schema must match exactly the Terraform Protocol v6 (Terraform Plugin Framework) provider's schema. + // Notably the attributes can have no Default values. return map[string]*schema.Schema{ "endpoint": { Description: fmt.Sprintf(endpointDesc, api.ESSEndpoint), Type: schema.TypeString, Optional: true, ValidateFunc: validation.IsURLWithScheme(validURLSchemes), - DefaultFunc: schema.MultiEnvDefaultFunc( - []string{"EC_ENDPOINT", "EC_HOST"}, - api.ESSEndpoint, - ), }, "apikey": { Description: apikeyDesc, Type: schema.TypeString, Optional: true, Sensitive: true, - DefaultFunc: schema.MultiEnvDefaultFunc( - []string{"EC_API_KEY"}, "", - ), }, "username": { Description: usernameDesc, Type: schema.TypeString, Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc( - []string{"EC_USER", "EC_USERNAME"}, "", - ), }, "password": { Description: passwordDesc, Type: schema.TypeString, Optional: true, Sensitive: true, - DefaultFunc: schema.MultiEnvDefaultFunc( - []string{"EC_PASS", "EC_PASSWORD"}, "", - ), }, "insecure": { Description: insecureDesc, Type: schema.TypeBool, Optional: true, - Default: false, - DefaultFunc: schema.MultiEnvDefaultFunc( - []string{"EC_INSECURE", "EC_SKIP_TLS_VALIDATION"}, - false, - ), }, "timeout": { Description: timeoutDesc, Type: schema.TypeString, Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc( - []string{"EC_TIMEOUT"}, defaultTimeout.String(), - ), }, "verbose": { Description: verboseDesc, Type: schema.TypeBool, Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc( - []string{"EC_VERBOSE"}, false, - ), }, "verbose_credentials": { Description: verboseCredsDesc, Type: schema.TypeBool, Optional: true, - DefaultFunc: schema.EnvDefaultFunc( - "EC_VERBOSE_CREDENTIALS", false, - ), }, "verbose_file": { Description: timeoutDesc, Type: schema.TypeString, Optional: true, - DefaultFunc: schema.EnvDefaultFunc( - "EC_VERBOSE_FILE", "request.log", - ), }, } } + +func New() provider.Provider { + return &Provider{} +} + +var _ internal.Provider = (*Provider)(nil) + +func (p *Provider) GetClient() *api.API { + return p.Client +} + +type Provider struct { + Client *api.API +} + +func (p *Provider) GetSchema(context.Context) (tfsdk.Schema, diag.Diagnostics) { + var diags diag.Diagnostics + + return tfsdk.Schema{ + Attributes: map[string]tfsdk.Attribute{ + "endpoint": { + Description: fmt.Sprintf(endpointDesc, api.ESSEndpoint), + Type: types.StringType, + Optional: true, + Validators: []tfsdk.AttributeValidator{validators.Known(), validators.IsURLWithSchemeValidator(validURLSchemes)}, + }, + "apikey": { + Description: apikeyDesc, + Type: types.StringType, + Optional: true, + Sensitive: true, + PlanModifiers: []tfsdk.AttributePlanModifier{planmodifier.DefaultFromEnv([]string{"EC_API_KEY"})}, + }, + "username": { + Description: usernameDesc, + Type: types.StringType, + Optional: true, + }, + "password": { + Description: passwordDesc, + Type: types.StringType, + Optional: true, + Sensitive: true, + }, + "insecure": { + Description: insecureDesc, + Type: types.BoolType, + Optional: true, + }, + "timeout": { + Description: timeoutDesc, + Type: types.StringType, + Optional: true, + }, + "verbose": { + Description: verboseDesc, + Type: types.BoolType, + Optional: true, + }, + "verbose_credentials": { + Description: verboseCredsDesc, + Type: types.BoolType, + Optional: true, + }, + "verbose_file": { + Description: timeoutDesc, + Type: types.StringType, + Optional: true, + }, + }, + }, diags +} + +type providerData struct { + Endpoint types.String `tfsdk:"endpoint"` + ApiKey types.String `tfsdk:"apikey"` + Username types.String `tfsdk:"username"` + Password types.String `tfsdk:"password"` + Insecure types.Bool `tfsdk:"insecure"` + Timeout types.String `tfsdk:"timeout"` + Verbose types.Bool `tfsdk:"verbose"` + VerboseCredentials types.Bool `tfsdk:"verbose_credentials"` + VerboseFile types.String `tfsdk:"verbose_file"` +} + +func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, res *provider.ConfigureResponse) { + // Retrieve provider data from configuration + var config providerData + diags := req.Config.Get(ctx, &config) + res.Diagnostics.Append(diags...) + if res.Diagnostics.HasError() { + return + } + + var endpoint string + if config.Endpoint.Null { + endpoint = util.MultiGetenv([]string{"EC_ENDPOINT", "EC_HOST"}, api.ESSEndpoint) + // TODO We need to validate the endpoint here, similar to how it is done if the value is passed via terraform (isURLWithSchemeValidator) + } else { + endpoint = config.Endpoint.Value + } + + var apiKey string + if config.ApiKey.Null { + apiKey = util.MultiGetenv([]string{"EC_API_KEY"}, "") + } else { + apiKey = config.ApiKey.Value + } + + var username string + if config.Username.Null { + username = util.MultiGetenv([]string{"EC_USER", "EC_USERNAME"}, "") + } else { + username = config.Username.Value + } + + var password string + if config.Password.Null { + password = util.MultiGetenv([]string{"EC_PASS", "EC_PASSWORD"}, "") + } else { + password = config.Password.Value + } + + var err error + var insecure bool + if config.Insecure.Null { + insecureStr := util.MultiGetenv([]string{"EC_INSECURE", "EC_SKIP_TLS_VALIDATION"}, "") + if insecure, err = util.StringToBool(insecureStr); err != nil { + res.Diagnostics.AddWarning( + "Unable to create client", + fmt.Sprintf("Invalid value %v for insecure", insecureStr), + ) + return + } + } else { + insecure = config.Insecure.Value + } + + var timeout string + if config.Timeout.Null { + timeout = util.MultiGetenv([]string{"EC_TIMEOUT"}, defaultTimeout.String()) + } else { + timeout = config.Timeout.Value + } + + var verbose bool + if config.Verbose.Null { + verboseStr := util.MultiGetenv([]string{"EC_VERBOSE"}, "") + if verbose, err = util.StringToBool(verboseStr); err != nil { + res.Diagnostics.AddWarning( + "Unable to create client", + fmt.Sprintf("Invalid value %v for verbose", verboseStr), + ) + return + } + } else { + verbose = config.Verbose.Value + } + + var verboseCredentials bool + if config.VerboseCredentials.Null { + verboseCredentialsStr := util.MultiGetenv([]string{"EC_VERBOSE_CREDENTIALS"}, "") + if verboseCredentials, err = util.StringToBool(verboseCredentialsStr); err != nil { + res.Diagnostics.AddWarning( + "Unable to create client", + fmt.Sprintf("Invalid value %v for verboseCredentials", verboseCredentialsStr), + ) + return + } + } else { + verboseCredentials = config.VerboseCredentials.Value + } + + var verboseFile string + if config.VerboseFile.Null { + verboseFile = util.MultiGetenv([]string{"EC_VERBOSE_FILE"}, "request.log") + } else { + verboseFile = config.VerboseFile.Value + } + + cfg, err := newAPIConfig( + endpoint, + apiKey, + username, + password, + insecure, + timeout, + verbose, + verboseCredentials, + verboseFile, + ) + if err != nil { + res.Diagnostics.AddWarning( + "Unable to create api Client config", + fmt.Sprintf("Unexpected error: %+v", err), + ) + return + } + + p.Client, err = api.NewAPI(cfg) + if err != nil { + res.Diagnostics.AddWarning( + "Unable to create api Client config", + fmt.Sprintf("Unexpected error: %+v", err), + ) + return + } +} + +func (p *Provider) GetResources(_ context.Context) (map[string]provider.ResourceType, diag.Diagnostics) { + return map[string]provider.ResourceType{}, nil +} + +func (p *Provider) GetDataSources(_ context.Context) (map[string]provider.DataSourceType, diag.Diagnostics) { + return map[string]provider.DataSourceType{ + "ec_stack": stackdatasource.DataSourceType{}, + "ec_deployment": deploymentdatasource.DataSourceType{}, + "ec_deployments": deploymentsdatasource.DataSourceType{}, + }, nil +} diff --git a/ec/provider_config.go b/ec/provider_config.go index f1a3ca8be..acef22ef5 100644 --- a/ec/provider_config.go +++ b/ec/provider_config.go @@ -20,14 +20,15 @@ package ec import ( "context" "fmt" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "net/http" "os" "time" "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/auth" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) const ( @@ -35,17 +36,15 @@ const ( ) var ( - // DefaultHTTPRetries to use for the provider's HTTP client. + // DefaultHTTPRetries to use for the provider's HTTP Client. DefaultHTTPRetries = 2 ) -// configureAPI implements schema.ConfigureContextFunc func configureAPI(_ context.Context, d *schema.ResourceData) (interface{}, diag.Diagnostics) { - cfg, err := newAPIConfig(d) + cfg, err := newAPIConfigLegacy(d) if err != nil { return nil, diag.FromErr(err) } - client, err := api.NewAPI(cfg) if err != nil { return nil, diag.FromErr(err) @@ -54,27 +53,52 @@ func configureAPI(_ context.Context, d *schema.ResourceData) (interface{}, diag. return client, nil } -func newAPIConfig(d *schema.ResourceData) (api.Config, error) { +func newAPIConfigLegacy(d *schema.ResourceData) (api.Config, error) { + endpoint := util.GetStringFromSchemaOrEnv(d, "endpoint", []string{"EC_ENDPOINT", "EC_HOST"}, api.ESSEndpoint) + apiKey := util.GetStringFromSchemaOrEnv(d, "apikey", []string{"EC_API_KEY"}, "") + username := util.GetStringFromSchemaOrEnv(d, "username", []string{"EC_USER", "EC_USERNAME"}, "") + password := util.GetStringFromSchemaOrEnv(d, "password", []string{"EC_PASS", "EC_PASSWORD"}, "") + timeout := util.GetStringFromSchemaOrEnv(d, "timeout", []string{"EC_TIMEOUT"}, defaultTimeout.String()) + insecure := util.GetBoolFromSchemaOrEnv(d, "insecure", []string{"EC_INSECURE", "EC_SKIP_TLS_VALIDATION"}) + verbose := util.GetBoolFromSchemaOrEnv(d, "verbose", []string{"EC_VERBOSE"}) + verboseCredentials := util.GetBoolFromSchemaOrEnv(d, "verbose_credentials", []string{"EC_VERBOSE_CREDENTIALS"}) + verboseFile := util.GetStringFromSchemaOrEnv(d, "verbose_file", []string{"EC_VERBOSE_FILE"}, "request.log") + cfg, err := newAPIConfig(endpoint, apiKey, username, password, insecure, timeout, verbose, verboseCredentials, verboseFile) + if err != nil { + return api.Config{}, err + } + return cfg, nil +} + +func newAPIConfig(endpoint string, + apiKey string, + username string, + password string, + insecure bool, + timeout string, + verbose bool, + verboseCredentials bool, + verboseFile string) (api.Config, error) { var cfg api.Config - timeout, err := time.ParseDuration(d.Get("timeout").(string)) + timeoutDuration, err := time.ParseDuration(timeout) if err != nil { return cfg, err } authWriter, err := auth.NewAuthWriter(auth.Config{ - APIKey: d.Get("apikey").(string), - Username: d.Get("username").(string), - Password: d.Get("password").(string), + APIKey: apiKey, + Username: username, + Password: password, }) if err != nil { return cfg, err } verboseCfg, err := verboseSettings( - d.Get("verbose_file").(string), - d.Get("verbose").(bool), - !d.Get("verbose_credentials").(bool), + verboseFile, + verbose, + !verboseCredentials, ) if err != nil { return cfg, err @@ -85,9 +109,9 @@ func newAPIConfig(d *schema.ResourceData) (api.Config, error) { Client: &http.Client{}, VerboseSettings: verboseCfg, AuthWriter: authWriter, - Host: d.Get("endpoint").(string), - SkipTLSVerify: d.Get("insecure").(bool), - Timeout: timeout, + Host: endpoint, + SkipTLSVerify: insecure, + Timeout: timeoutDuration, UserAgent: userAgent(Version), Retries: DefaultHTTPRetries, }, nil diff --git a/ec/provider_config_test.go b/ec/provider_config_test.go index cda457e90..2419f828b 100644 --- a/ec/provider_config_test.go +++ b/ec/provider_config_test.go @@ -343,7 +343,7 @@ func Test_newAPIConfig(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := newAPIConfig(tt.args.d) + got, err := newAPIConfigLegacy(tt.args.d) assert.Equal(t, tt.err, err) if got.Verbose && err == nil { diff --git a/ec/version.go b/ec/version.go index 8582b5a38..e00612ef5 100644 --- a/ec/version.go +++ b/ec/version.go @@ -18,4 +18,4 @@ package ec // Version contains the current terraform provider version. -const Version = "0.4.0-dev" +const Version = "0.5.0-dev" diff --git a/go.mod b/go.mod index 9a9c09d4a..f2c63afd2 100644 --- a/go.mod +++ b/go.mod @@ -7,38 +7,38 @@ require ( github.com/elastic/cloud-sdk-go v1.10.0 github.com/go-openapi/runtime v0.24.1 github.com/go-openapi/strfmt v0.21.3 + github.com/hashicorp/terraform-plugin-framework v0.11.1 github.com/hashicorp/terraform-plugin-go v0.14.0 github.com/hashicorp/terraform-plugin-mux v0.7.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.21.0 github.com/stretchr/testify v1.8.0 + golang.org/x/exp v0.0.0-20220827204233-334a2380cb91 ) require ( - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect - github.com/agext/levenshtein v1.2.2 // indirect + github.com/agext/levenshtein v1.2.3 // indirect github.com/apparentlymart/go-cidr v1.1.0 // indirect github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/fatih/color v1.13.0 // indirect - github.com/go-openapi/analysis v0.21.2 // indirect - github.com/go-openapi/errors v0.20.2 // indirect + github.com/go-openapi/analysis v0.21.4 // indirect + github.com/go-openapi/errors v0.20.3 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.6 // indirect - github.com/go-openapi/loads v0.21.1 // indirect - github.com/go-openapi/spec v0.20.4 // indirect - github.com/go-openapi/swag v0.21.1 // indirect - github.com/go-openapi/validate v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/loads v0.21.2 // indirect + github.com/go-openapi/spec v0.20.7 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/validate v0.22.0 // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/go-cmp v0.5.8 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 // indirect - github.com/hashicorp/go-hclog v1.2.1 // indirect + github.com/hashicorp/go-hclog v1.3.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-plugin v1.4.4 // indirect + github.com/hashicorp/go-plugin v1.4.5 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/hc-install v0.4.0 // indirect @@ -49,32 +49,32 @@ require ( github.com/hashicorp/terraform-plugin-log v0.7.0 // indirect github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c // indirect github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 // indirect - github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect + github.com/hashicorp/yamux v0.1.1 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect - github.com/mitchellh/go-wordwrap v1.0.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/oklog/run v1.0.0 // indirect + github.com/oklog/run v1.1.0 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect - github.com/vmihailenco/tagparser v0.1.1 // indirect - github.com/zclconf/go-cty v1.10.0 // indirect - go.mongodb.org/mongo-driver v1.10.0 // indirect + github.com/vmihailenco/tagparser v0.1.2 // indirect + github.com/zclconf/go-cty v1.11.0 // indirect + go.mongodb.org/mongo-driver v1.10.1 // indirect golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect - golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6 // indirect + golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b // indirect + golang.org/x/sys v0.0.0-20220829200755-d48e67d00261 // indirect golang.org/x/text v0.3.7 // indirect - google.golang.org/appengine v1.6.6 // indirect - google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d // indirect - google.golang.org/grpc v1.48.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf // indirect + google.golang.org/grpc v1.49.0 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 097a6d198..d1e0d7ea1 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,3 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= @@ -7,18 +6,15 @@ github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugX github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ= github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= -github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= -github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFUye+ZcSR6opIgz9Co7WcDx6ZcY+RjfFHoA0I= @@ -37,15 +33,6 @@ github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:W github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -57,12 +44,6 @@ github.com/elastic/cloud-sdk-go v1.10.0 h1:1WBUkP71ogoxynWfaGg5Bm8Z36F4tL3bjiu+e github.com/elastic/cloud-sdk-go v1.10.0/go.mod h1:BMx5iwmVwL8gpomLSMPI6gcvfWzrV4KsWSnbPlWwlrI= github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= @@ -89,8 +70,9 @@ github.com/go-openapi/analysis v0.19.16/go.mod h1:GLInF007N83Ad3m8a/CbQ5TPzdnGT7 github.com/go-openapi/analysis v0.20.0/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og= github.com/go-openapi/analysis v0.20.1/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og= github.com/go-openapi/analysis v0.21.1/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= -github.com/go-openapi/analysis v0.21.2 h1:hXFrOYFHUAMQdu6zwAiKKJHJQ8kqZs1ux/ru1P1wLJU= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= +github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= +github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= @@ -100,8 +82,9 @@ github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpX github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.1/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.2 h1:dxy7PGTqEh94zj2E3h1cUmQQWiM1+aeCROfAr02EmK8= github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.3 h1:rz6kiC84sqNQoqrtulzaL/VERgkoCyB6WdEkc2ujzUc= +github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= @@ -113,8 +96,9 @@ github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3Hfo github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs= github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= +github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= @@ -126,8 +110,9 @@ github.com/go-openapi/loads v0.19.7/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hs github.com/go-openapi/loads v0.20.0/go.mod h1:2LhKquiE513rN5xC6Aan6lYOSddlL8Mp20AW9kpviM4= github.com/go-openapi/loads v0.20.2/go.mod h1:hTVUotJ+UonAMMZsvakEgmWKgtulweO9vYP2bQYKA/o= github.com/go-openapi/loads v0.21.0/go.mod h1:rHYve9nZrQ4CJhyeIIFJINGCg1tQpx2yJrrNo8sf1ws= -github.com/go-openapi/loads v0.21.1 h1:Wb3nVZpdEzDTcly8S4HMkey6fjARRzb7iEaySimlDW0= github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= +github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= +github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= @@ -147,8 +132,10 @@ github.com/go-openapi/spec v0.19.15/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFu github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= github.com/go-openapi/spec v0.20.1/go.mod h1:93x7oh+d+FQsmsieroS4cmR3u0p/ywH649a3qwC9OsQ= github.com/go-openapi/spec v0.20.3/go.mod h1:gG4F8wdEDN+YPBMVnzE85Rbhf+Th2DTvA9nFPQ5AYEg= -github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1M= github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= +github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.20.7 h1:1Rlu/ZrOCCob0n+JKKJAWhNWMPW8bOZRg8FJaY+0SKI= +github.com/go-openapi/spec v0.20.7/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= @@ -174,8 +161,9 @@ github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5H github.com/go-openapi/swag v0.19.13/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= @@ -184,8 +172,9 @@ github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0 github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI= github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0= github.com/go-openapi/validate v0.20.3/go.mod h1:goDdqVGiigM3jChcrYJxD2joalke3ZXeftD16byIjA4= -github.com/go-openapi/validate v0.21.0 h1:+Wqk39yKOhfpLqNLEC0/eViCkzM5FVXVqrvt526+wcI= github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-openapi/validate v0.22.0 h1:b0QecH6VslW/TxtpKgzpO1SNG7GU2FsaqKdP1E2T50Y= +github.com/go-openapi/validate v0.22.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= @@ -214,22 +203,10 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= @@ -237,18 +214,13 @@ github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -260,12 +232,12 @@ github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9n github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= -github.com/hashicorp/go-hclog v1.2.1 h1:YQsLlGDJgwhXFpucSPyVbCBviQtjlHv3jLTlp8YmtEw= -github.com/hashicorp/go-hclog v1.2.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.3.0 h1:G0ACM8Z2WilWgPv3Vdzwm3V0BQu/kSmrkVtpe1fy9do= +github.com/hashicorp/go-hclog v1.3.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.4.4 h1:NVdrSdFRt3SkZtNckJ6tog7gbpRrcbOjQi/rgF7JYWQ= -github.com/hashicorp/go-plugin v1.4.4/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= +github.com/hashicorp/go-plugin v1.4.5 h1:oTE/oQR4eghggRg8VY7PAz3dr++VwDNBGCcOfIvHpBo= +github.com/hashicorp/go-plugin v1.4.5/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -283,6 +255,8 @@ github.com/hashicorp/terraform-exec v0.17.2 h1:EU7i3Fh7vDUI9nNRdMATCEfnm9axzTnad github.com/hashicorp/terraform-exec v0.17.2/go.mod h1:tuIbsL2l4MlwwIZx9HPM+LOV9vVyEfBYu2GsO1uH3/8= github.com/hashicorp/terraform-json v0.14.0 h1:sh9iZ1Y8IFJLx+xQiKHGud6/TSUCM0N8e17dKDpqV7s= github.com/hashicorp/terraform-json v0.14.0/go.mod h1:5A9HIWPkk4e5aeeXIBbkcOvaZbIYnAIkEyqP2pNSckM= +github.com/hashicorp/terraform-plugin-framework v0.11.1 h1:rq8f+TLDO4tJu+n9mMYlDrcRoIdrg0gTUvV2Jr0Ya24= +github.com/hashicorp/terraform-plugin-framework v0.11.1/go.mod h1:GENReHOz6GEt8Jk3UN94vk8BdC6irEHFgN3Z9HPhPUU= github.com/hashicorp/terraform-plugin-go v0.14.0 h1:ttnSlS8bz3ZPYbMb84DpcPhY4F5DsQtcAS7cHo8uvP4= github.com/hashicorp/terraform-plugin-go v0.14.0/go.mod h1:2nNCBeRLaenyQEi78xrGrs9hMbulveqG/zDMQSvVJTE= github.com/hashicorp/terraform-plugin-log v0.7.0 h1:SDxJUyT8TwN4l5b5/VkiTIaQgY6R+Y2BQ0sRZftGKQs= @@ -295,8 +269,8 @@ github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b57 github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c/go.mod h1:Wn3Na71knbXc1G8Lh+yu/dQWWJeFQEpDeJMtWMtlmNI= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 h1:HKLsbzeOsfXmKNpr3GiT18XAblV0BjCbzL8KQAMZGa0= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= +github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -340,19 +314,21 @@ github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsI github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= @@ -366,8 +342,8 @@ github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce h1:RPclfga2SEJmgMmz2k+Mg7cowZ8yv4Trqw9UsJby758= -github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= @@ -380,8 +356,6 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -404,7 +378,6 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -419,8 +392,9 @@ github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaU github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U= github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= -github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= +github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= @@ -433,8 +407,9 @@ github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHM github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty v1.10.0 h1:mp9ZXQeIcN8kAwuqorjH+Q+njbJKjLrvB2yIh4q7U+0= github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +github.com/zclconf/go-cty v1.11.0 h1:726SxLdi2SDnjY+BStqB9J1hNp4+2WlzyXLuimibIe0= +github.com/zclconf/go-cty v1.11.0/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= @@ -448,9 +423,9 @@ go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R7 go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.8.2/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= -go.mongodb.org/mongo-driver v1.10.0 h1:UtV6N5k14upNp4LTduX0QCufG124fSu25Wz9tu94GLg= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.mongodb.org/mongo-driver v1.10.1 h1:NujsPveKwHaWuKUer/ceo9DzEe7HIj1SlJ6uvXZG0S4= +go.mongodb.org/mongo-driver v1.10.1/go.mod h1:z4XpeoU6w+9Vht+jAFyLgVrD+jGSQQe0+CBWFHNiHt8= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -467,16 +442,12 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91 h1:tnebWN09GYg9OLPss1KXj8txwZc6X6uMr6VFdcGNbHw= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -488,8 +459,6 @@ golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= @@ -499,20 +468,17 @@ golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5o golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b h1:ZmngSVLe/wycRns9MKikG9OWIEjGcGAkacif7oYQaUY= +golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -531,7 +497,6 @@ golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -541,8 +506,10 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6 h1:nonptSpoQ4vQjyraW20DXPAglgQfVnM9ZC6MmNLMR60= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261 h1:v6hYoSR9T5oet+pMXwUWkbiVqx/63mlHjefrHmxwfeY= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= @@ -557,51 +524,25 @@ golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d h1:92D1fum1bJLKSdr11OJ+54YeCMCGYIygTA7R/YZxH5M= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w= -google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf h1:Q5xNKbTSFwkuaaGaR7CMcXEM5sy19KYdUU8iF8/iRC0= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -615,7 +556,6 @@ gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -627,5 +567,3 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/main.go b/main.go index 29889ca0c..1e989d2fe 100644 --- a/main.go +++ b/main.go @@ -20,6 +20,7 @@ package main import ( "context" "flag" + "github.com/hashicorp/terraform-plugin-framework/providerserver" "log" "github.com/elastic/terraform-provider-ec/ec" @@ -40,7 +41,7 @@ func main() { upgradedSdkProvider, err := tf5to6server.UpgradeServer( context.Background(), - ec.Provider().GRPCProvider, + ec.LegacyProvider().GRPCProvider, ) if err != nil { @@ -50,8 +51,9 @@ func main() { ctx := context.Background() providers := []func() tfprotov6.ProviderServer{ func() tfprotov6.ProviderServer { return upgradedSdkProvider }, - // TODO - // add new v6 provider with `ec_deployment` resource + func() tfprotov6.ProviderServer { + return providerserver.NewProtocol6(ec.New())() + }, } muxServer, err := tf6muxserver.NewMuxServer(ctx, providers...) From a5de6e998e9c344e1f4f33fb0fd620f5a840e973 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 7 Sep 2022 12:16:05 +0200 Subject: [PATCH 005/104] Migrate resource ec_deployment_traffic_filter_association to terraform-provider-framework --- .github/workflows/go.yml | 1 + ec/acc/acc_prereq.go | 11 +- ...loyment_traffic_filter_association_test.go | 58 ++++ ...filter_association_basic_ignore_changes.tf | 39 +++ .../deploymentdatasource/datasource.go | 16 +- .../deploymentsdatasource/datasource.go | 11 +- ec/ecdatasource/stackdatasource/datasource.go | 11 +- .../trafficfilterassocresource/create.go | 42 ++- .../trafficfilterassocresource/delete.go | 34 +- .../trafficfilterassocresource/delete_test.go | 113 ------ .../trafficfilterassocresource/expanders.go | 33 -- .../expanders_test.go | 61 ---- .../flatteners_test.go | 120 ------- .../import_state.go | 44 +++ .../trafficfilterassocresource/read.go | 47 ++- .../trafficfilterassocresource/read_test.go | 114 ------ .../trafficfilterassocresource/resource.go | 40 --- .../resource_test.go | 324 ++++++++++++++++++ .../trafficfilterassocresource/schema.go | 77 ++++- .../{testutils.go => update.go} | 12 +- .../flatteners/flatten_endpoint.go} | 35 +- .../flatteners/flatten_tags.go} | 12 +- .../flatteners/flatten_tags_test.go} | 9 +- ec/internal/planmodifier/default_from_env.go | 17 + ec/internal/planmodifier/default_value.go | 17 + ec/internal/provider.go | 32 ++ ec/internal/validators/knownvalidator.go | 17 + ec/internal/validators/urlvalidator.go | 24 +- ec/provider.go | 35 +- ec/provider_config.go | 9 +- examples/deployment_with_init/main.tf | 18 +- go.mod | 3 +- go.sum | 2 + main.go | 7 +- 34 files changed, 796 insertions(+), 649 deletions(-) create mode 100644 ec/acc/testdata/deployment_traffic_filter_association_basic_ignore_changes.tf delete mode 100644 ec/ecresource/trafficfilterassocresource/delete_test.go delete mode 100644 ec/ecresource/trafficfilterassocresource/expanders.go delete mode 100644 ec/ecresource/trafficfilterassocresource/expanders_test.go delete mode 100644 ec/ecresource/trafficfilterassocresource/flatteners_test.go create mode 100644 ec/ecresource/trafficfilterassocresource/import_state.go delete mode 100644 ec/ecresource/trafficfilterassocresource/read_test.go delete mode 100644 ec/ecresource/trafficfilterassocresource/resource.go create mode 100644 ec/ecresource/trafficfilterassocresource/resource_test.go rename ec/ecresource/trafficfilterassocresource/{testutils.go => update.go} (73%) rename ec/{ecresource/trafficfilterassocresource/flatteners.go => internal/flatteners/flatten_endpoint.go} (57%) rename ec/{ecdatasource/deploymentdatasource/flatteners_tags.go => internal/flatteners/flatten_tags.go} (82%) rename ec/{ecdatasource/deploymentdatasource/flatteners_tags_test.go => internal/flatteners/flatten_tags_test.go} (92%) diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 76d776287..47eb58721 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -22,6 +22,7 @@ jobs: uses: hashicorp/setup-terraform@v2 with: terraform_version: "1.x.x" + terraform_wrapper: false - name: Cache Go Modules uses: actions/cache@v3 diff --git a/ec/acc/acc_prereq.go b/ec/acc/acc_prereq.go index 9e77eea24..e079cd6e0 100644 --- a/ec/acc/acc_prereq.go +++ b/ec/acc/acc_prereq.go @@ -19,13 +19,14 @@ package acc import ( "context" - "github.com/hashicorp/terraform-plugin-framework/providerserver" - "github.com/hashicorp/terraform-plugin-go/tfprotov5" - "github.com/hashicorp/terraform-plugin-mux/tf5muxserver" "net/http" "os" "testing" + "github.com/hashicorp/terraform-plugin-framework/providerserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-mux/tf5muxserver" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/auth" "github.com/elastic/terraform-provider-ec/ec" @@ -44,9 +45,7 @@ func protoV5ProviderFactories() map[string]func() (tfprotov5.ProviderServer, err func() tfprotov5.ProviderServer { return ec.LegacyProvider().GRPCProvider() }, - func() tfprotov5.ProviderServer { - return providerserver.NewProtocol5(ec.New())() - }, + providerserver.NewProtocol5(ec.New("acc-tests")), ) }, } diff --git a/ec/acc/deployment_traffic_filter_association_test.go b/ec/acc/deployment_traffic_filter_association_test.go index 0fee2adc6..ab657ad1e 100644 --- a/ec/acc/deployment_traffic_filter_association_test.go +++ b/ec/acc/deployment_traffic_filter_association_test.go @@ -72,6 +72,64 @@ func TestAccDeploymentTrafficFilterAssociation_basic(t *testing.T) { }) } +func TestAccDeploymentTrafficFilterAssociation_UpgradeFrom0_4_1(t *testing.T) { + resName := "ec_deployment_traffic_filter.tf_assoc" + resAssocName := "ec_deployment_traffic_filter_association.tf_assoc" + randomName := acctest.RandomWithPrefix(prefix) + startCfg := "testdata/deployment_traffic_filter_association_basic.tf" + ignoreChangesCfgFile := "testdata/deployment_traffic_filter_association_basic_ignore_changes.tf" + cfg := fixtureAccDeploymentTrafficFilterResourceAssociationBasic(t, startCfg, randomName, getRegion(), defaultTemplate) + ignoreChangesCfg := fixtureAccDeploymentTrafficFilterResourceAssociationBasic(t, ignoreChangesCfgFile, randomName, getRegion(), defaultTemplate) + + // Required because of a bug - see https://discuss.hashicorp.com/t/acceptance-testing-sdk-framework-upgrade-issue/44166/2 + externalProviderConfig := ` +terraform { + required_providers { + ec = { + source = "elastic/ec" + version = "0.4.1" + } + } +}` + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + CheckDestroy: testAccDeploymentTrafficFilterDestroy, + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "ec": { + VersionConstraint: "0.4.1", + Source: "elastic/ec", + }, + }, + // Expects a non-empty plan since "ec_deployment.traffic_filter" + // will have changes due to the traffic filter association. + ExpectNonEmptyPlan: true, + Config: cfg + externalProviderConfig, + Check: checkBasicDeploymentTrafficFilterAssociationResource( + resName, resAssocName, randomName, + resource.TestCheckResourceAttr(resName, "include_by_default", "false"), + resource.TestCheckResourceAttr(resName, "type", "ip"), + resource.TestCheckResourceAttr(resName, "rule.#", "1"), + resource.TestCheckResourceAttr(resName, "rule.0.source", "0.0.0.0/0"), + ), + }, + { + PlanOnly: true, + ProtoV5ProviderFactories: testAccProviderFactory, + Config: ignoreChangesCfg, + Check: checkBasicDeploymentTrafficFilterAssociationResource( + resName, resAssocName, randomName, + resource.TestCheckResourceAttr(resName, "include_by_default", "false"), + resource.TestCheckResourceAttr(resName, "type", "ip"), + resource.TestCheckResourceAttr(resName, "rule.#", "1"), + resource.TestCheckResourceAttr(resName, "rule.0.source", "0.0.0.0/0"), + ), + }, + }, + }) +} + func fixtureAccDeploymentTrafficFilterResourceAssociationBasic(t *testing.T, fileName, name, region, depTpl string) string { t.Helper() diff --git a/ec/acc/testdata/deployment_traffic_filter_association_basic_ignore_changes.tf b/ec/acc/testdata/deployment_traffic_filter_association_basic_ignore_changes.tf new file mode 100644 index 000000000..ce1bce981 --- /dev/null +++ b/ec/acc/testdata/deployment_traffic_filter_association_basic_ignore_changes.tf @@ -0,0 +1,39 @@ +data "ec_stack" "latest" { + version_regex = "latest" + region = "%s" +} + +resource "ec_deployment" "tf_assoc" { + name = "%s" + region = "%s" + version = data.ec_stack.latest.version + deployment_template_id = "%s" + + elasticsearch { + topology { + id = "hot_content" + size = "1g" + } + } + + kibana {} + + lifecycle { + ignore_changes = [traffic_filter] + } +} + +resource "ec_deployment_traffic_filter" "tf_assoc" { + name = "%s" + region = "%s" + type = "ip" + + rule { + source = "0.0.0.0/0" + } +} + +resource "ec_deployment_traffic_filter_association" "tf_assoc" { + traffic_filter_id = ec_deployment_traffic_filter.tf_assoc.id + deployment_id = ec_deployment.tf_assoc.id +} diff --git a/ec/ecdatasource/deploymentdatasource/datasource.go b/ec/ecdatasource/deploymentdatasource/datasource.go index e940639b9..1239f6041 100644 --- a/ec/ecdatasource/deploymentdatasource/datasource.go +++ b/ec/ecdatasource/deploymentdatasource/datasource.go @@ -20,7 +20,7 @@ package deploymentdatasource import ( "context" "fmt" - "github.com/elastic/terraform-provider-ec/ec/internal" + "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/provider" @@ -30,6 +30,8 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deputil" "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal" + "github.com/elastic/terraform-provider-ec/ec/internal/flatteners" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) @@ -37,10 +39,12 @@ var _ provider.DataSourceType = (*DataSourceType)(nil) type DataSourceType struct{} -func (s DataSourceType) NewDataSource(ctx context.Context, p provider.Provider) (datasource.DataSource, diag.Diagnostics) { +func (s DataSourceType) NewDataSource(ctx context.Context, in provider.Provider) (datasource.DataSource, diag.Diagnostics) { + p, diags := internal.ConvertProviderType(in) + return &deploymentDataSource{ - p: p.(internal.Provider), - }, nil + p: p, + }, diags } var _ datasource.DataSource = (*deploymentDataSource)(nil) @@ -115,7 +119,9 @@ func modelToState(ctx context.Context, res *models.DeploymentGetResponse, state diags.Append(flattenIntegrationsServerResources(ctx, res.Resources.IntegrationsServer, &state.IntegrationsServer)...) diags.Append(flattenEnterpriseSearchResources(ctx, res.Resources.EnterpriseSearch, &state.EnterpriseSearch)...) - state.Tags = flattenTags(res.Metadata) + if res.Metadata != nil { + state.Tags = flatteners.FlattenTags(res.Metadata.Tags) + } return diags } diff --git a/ec/ecdatasource/deploymentsdatasource/datasource.go b/ec/ecdatasource/deploymentsdatasource/datasource.go index 0fbd64040..7f4b6b64c 100644 --- a/ec/ecdatasource/deploymentsdatasource/datasource.go +++ b/ec/ecdatasource/deploymentsdatasource/datasource.go @@ -20,13 +20,14 @@ package deploymentsdatasource import ( "context" "fmt" + "strconv" + "github.com/elastic/terraform-provider-ec/ec/internal" "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/provider" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" - "strconv" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" "github.com/elastic/cloud-sdk-go/pkg/models" @@ -37,10 +38,12 @@ var _ provider.DataSourceType = (*DataSourceType)(nil) type DataSourceType struct{} -func (s DataSourceType) NewDataSource(ctx context.Context, p provider.Provider) (datasource.DataSource, diag.Diagnostics) { +func (s DataSourceType) NewDataSource(ctx context.Context, in provider.Provider) (datasource.DataSource, diag.Diagnostics) { + p, diags := internal.ConvertProviderType(in) + return &deploymentsDataSource{ - p: p.(internal.Provider), - }, nil + p: p, + }, diags } var _ datasource.DataSource = (*deploymentsDataSource)(nil) diff --git a/ec/ecdatasource/stackdatasource/datasource.go b/ec/ecdatasource/stackdatasource/datasource.go index f0fa5002e..200c72f71 100644 --- a/ec/ecdatasource/stackdatasource/datasource.go +++ b/ec/ecdatasource/stackdatasource/datasource.go @@ -20,6 +20,8 @@ package stackdatasource import ( "context" "fmt" + "regexp" + "github.com/elastic/cloud-sdk-go/pkg/api/stackapi" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/terraform-provider-ec/ec/internal" @@ -28,17 +30,18 @@ import ( "github.com/hashicorp/terraform-plugin-framework/provider" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" - "regexp" ) var _ provider.DataSourceType = (*DataSourceType)(nil) type DataSourceType struct{} -func (s DataSourceType) NewDataSource(ctx context.Context, p provider.Provider) (datasource.DataSource, diag.Diagnostics) { +func (s DataSourceType) NewDataSource(ctx context.Context, in provider.Provider) (datasource.DataSource, diag.Diagnostics) { + p, diags := internal.ConvertProviderType(in) + return &stackDataSource{ - p: p.(internal.Provider), - }, nil + p: p, + }, diags } var _ datasource.DataSource = (*stackDataSource)(nil) diff --git a/ec/ecresource/trafficfilterassocresource/create.go b/ec/ecresource/trafficfilterassocresource/create.go index c1c75e46e..7c2946995 100644 --- a/ec/ecresource/trafficfilterassocresource/create.go +++ b/ec/ecresource/trafficfilterassocresource/create.go @@ -19,29 +19,35 @@ package trafficfilterassocresource import ( "context" - "strconv" - "strings" - - "github.com/elastic/cloud-sdk-go/pkg/api" + "fmt" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" ) -// create will create a new deployment traffic filter ruleset association. -func create(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*api.API) - params := expand(d) - params.API = client +func (t trafficFilterAssocResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var newState modelV0 - if err := trafficfilterapi.CreateAssociation(params); err != nil { - return diag.FromErr(err) + diags := request.Plan.Get(ctx, &newState) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return } - d.SetId(hashID(params.EntityID, params.ID)) - return read(ctx, d, meta) -} + if err := trafficfilterapi.CreateAssociation(trafficfilterapi.CreateAssociationParams{ + API: t.provider.GetClient(), + ID: newState.TrafficFilterID.Value, + EntityID: newState.DeploymentID.Value, + EntityType: entityTypeDeployment, + }); err != nil { + response.Diagnostics.AddError(err.Error(), err.Error()) + return + } -func hashID(elem ...string) string { - return strconv.Itoa(schema.HashString(strings.Join(elem, "-"))) + newState.ID = types.String{Value: fmt.Sprintf("%v-%v", newState.DeploymentID.Value, newState.TrafficFilterID.Value)} + diags = response.State.Set(ctx, newState) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } } diff --git a/ec/ecresource/trafficfilterassocresource/delete.go b/ec/ecresource/trafficfilterassocresource/delete.go index 2e77312c8..8d4103b55 100644 --- a/ec/ecresource/trafficfilterassocresource/delete.go +++ b/ec/ecresource/trafficfilterassocresource/delete.go @@ -20,31 +20,31 @@ package trafficfilterassocresource import ( "context" "errors" - - "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" "github.com/elastic/cloud-sdk-go/pkg/client/deployments_traffic_filter" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/resource" ) -// delete will delete an existing deployment traffic filter ruleset association. -func delete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var client = meta.(*api.API) +func (t trafficFilterAssocResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var state modelV0 - params := expand(d) - params.API = client + diags := request.State.Get(ctx, &state) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } - if err := trafficfilterapi.DeleteAssociation(trafficfilterapi.DeleteAssociationParams(params)); err != nil { - if associationDeleted(err) { - d.SetId("") - return nil + if err := trafficfilterapi.DeleteAssociation(trafficfilterapi.DeleteAssociationParams{ + API: t.provider.GetClient(), + ID: state.TrafficFilterID.Value, + EntityID: state.DeploymentID.Value, + EntityType: entityTypeDeployment, + }); err != nil { + if !associationDeleted(err) { + response.Diagnostics.AddError(err.Error(), err.Error()) + return } - return diag.FromErr(err) } - - d.SetId("") - return nil } func associationDeleted(err error) bool { diff --git a/ec/ecresource/trafficfilterassocresource/delete_test.go b/ec/ecresource/trafficfilterassocresource/delete_test.go deleted file mode 100644 index bc738a70b..000000000 --- a/ec/ecresource/trafficfilterassocresource/delete_test.go +++ /dev/null @@ -1,113 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package trafficfilterassocresource - -import ( - "context" - "testing" - - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/terraform-provider-ec/ec/internal/util" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" -) - -func Test_delete(t *testing.T) { - tc500Err := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleTrafficFilterAssociation(), - Schema: newSchema(), - }) - wantTC500 := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleTrafficFilterAssociation(), - Schema: newSchema(), - }) - - tc404Err := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleTrafficFilterAssociation(), - Schema: newSchema(), - }) - wantTC404 := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleTrafficFilterAssociation(), - Schema: newSchema(), - }) - wantTC404.SetId("") - type args struct { - ctx context.Context - d *schema.ResourceData - meta interface{} - } - tests := []struct { - name string - args args - want diag.Diagnostics - wantRD *schema.ResourceData - }{ - { - name: "returns an error when it receives a 500", - args: args{ - d: tc500Err, - meta: api.NewMock(mock.NewErrorResponse(500, mock.APIError{ - Code: "some", Message: "message", - })), - }, - want: diag.Diagnostics{ - { - Severity: diag.Error, - Summary: "api error: 1 error occurred:\n\t* some: message\n\n", - }, - }, - wantRD: wantTC500, - }, - { - name: "returns nil and unsets the state when the error is known", - args: args{ - d: tc404Err, - meta: api.NewMock(mock.NewErrorResponse(404, mock.APIError{ - Code: "some", Message: "message", - })), - }, - want: nil, - wantRD: wantTC404, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := delete(tt.args.ctx, tt.args.d, tt.args.meta) - assert.Equal(t, tt.want, got) - var want interface{} - if tt.wantRD != nil { - if s := tt.wantRD.State(); s != nil { - want = s.Attributes - } - } - - var gotState interface{} - if s := tt.args.d.State(); s != nil { - gotState = s.Attributes - } - - assert.Equal(t, want, gotState) - }) - } -} diff --git a/ec/ecresource/trafficfilterassocresource/expanders.go b/ec/ecresource/trafficfilterassocresource/expanders.go deleted file mode 100644 index 185c53635..000000000 --- a/ec/ecresource/trafficfilterassocresource/expanders.go +++ /dev/null @@ -1,33 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package trafficfilterassocresource - -import ( - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -const entityType = "deployment" - -func expand(d *schema.ResourceData) trafficfilterapi.CreateAssociationParams { - return trafficfilterapi.CreateAssociationParams{ - ID: d.Get("traffic_filter_id").(string), - EntityID: d.Get("deployment_id").(string), - EntityType: entityType, - } -} diff --git a/ec/ecresource/trafficfilterassocresource/expanders_test.go b/ec/ecresource/trafficfilterassocresource/expanders_test.go deleted file mode 100644 index 798b9f6ba..000000000 --- a/ec/ecresource/trafficfilterassocresource/expanders_test.go +++ /dev/null @@ -1,61 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package trafficfilterassocresource - -import ( - "testing" - - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -func Test_expand(t *testing.T) { - rd := util.NewResourceData(t, util.ResDataParams{ - State: newSampleTrafficFilterAssociation(), - ID: "123451", - Schema: newSchema(), - }) - type args struct { - d *schema.ResourceData - } - tests := []struct { - name string - args args - want trafficfilterapi.CreateAssociationParams - }{ - { - name: "expands the resource data", - args: args{d: rd}, - want: trafficfilterapi.CreateAssociationParams{ - ID: mockTrafficFilterID, - EntityID: mock.ValidClusterID, - EntityType: entityType, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := expand(tt.args.d) - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/ec/ecresource/trafficfilterassocresource/flatteners_test.go b/ec/ecresource/trafficfilterassocresource/flatteners_test.go deleted file mode 100644 index 62d66ac99..000000000 --- a/ec/ecresource/trafficfilterassocresource/flatteners_test.go +++ /dev/null @@ -1,120 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package trafficfilterassocresource - -import ( - "testing" - - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -func Test_flatten(t *testing.T) { - rd := util.NewResourceData(t, util.ResDataParams{ - State: newSampleTrafficFilterAssociation(), - ID: "123451", - Schema: newSchema(), - }) - - wantNotFoundRd := util.NewResourceData(t, util.ResDataParams{ - State: newSampleTrafficFilterAssociation(), - ID: "123451", - Schema: newSchema(), - }) - - _ = wantNotFoundRd.Set("deployment_id", "") - _ = wantNotFoundRd.Set("traffic_filter_id", "") - type args struct { - res *models.TrafficFilterRulesetInfo - d *schema.ResourceData - } - tests := []struct { - name string - args args - want *schema.ResourceData - err error - }{ - { - name: "empty response returns nil", - args: args{d: rd}, - }, - { - name: "flattens the response", - args: args{d: rd, - res: &models.TrafficFilterRulesetInfo{ - Associations: []*models.FilterAssociation{ - { - EntityType: ec.String("cluster"), - ID: ec.String("someid"), - }, - { - EntityType: ec.String(entityType), - ID: ec.String(mock.ValidClusterID), - }, - }, - }, - }, - want: rd, - }, - { - name: "flattens the response even when the association has been removed externally", - args: args{d: rd, - res: &models.TrafficFilterRulesetInfo{ - Associations: []*models.FilterAssociation{{ - EntityType: ec.String("cluster"), - ID: ec.String("someid"), - }}, - }, - }, - want: wantNotFoundRd, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := flatten(tt.args.res, tt.args.d) - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } - - if tt.args.res == nil { - return - } - - wantState := tt.want.State() - if wantState == nil { - tt.want.SetId("some") - wantState = tt.want.State() - } - - gotState := tt.args.d.State() - if gotState == nil { - tt.args.d.SetId("some") - gotState = tt.want.State() - } - - assert.Equal(t, wantState.Attributes, gotState.Attributes) - }) - } -} diff --git a/ec/ecresource/trafficfilterassocresource/import_state.go b/ec/ecresource/trafficfilterassocresource/import_state.go new file mode 100644 index 000000000..22d8c3e30 --- /dev/null +++ b/ec/ecresource/trafficfilterassocresource/import_state.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package trafficfilterassocresource + +import ( + "context" + "fmt" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "strings" +) + +func (t trafficFilterAssocResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + idParts := strings.Split(request.ID, ",") + + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + response.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: deployment_id,traffic_filter_id. Got: %q", request.ID), + ) + return + } + deploymentId := idParts[0] + trafficFilterId := idParts[1] + + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("id"), fmt.Sprintf("%v-%v", deploymentId, trafficFilterId))...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("deployment_id"), deploymentId)...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("traffic_filter_id"), trafficFilterId)...) +} diff --git a/ec/ecresource/trafficfilterassocresource/read.go b/ec/ecresource/trafficfilterassocresource/read.go index 938ac7dd2..31a901b1e 100644 --- a/ec/ecresource/trafficfilterassocresource/read.go +++ b/ec/ecresource/trafficfilterassocresource/read.go @@ -19,35 +19,48 @@ package trafficfilterassocresource import ( "context" - - "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/resource" ) -// read queries the remote deployment traffic filter ruleset association and -// updates the local state. -func read(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var client = meta.(*api.API) +func (t trafficFilterAssocResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var state modelV0 + + diags := request.State.Get(ctx, &state) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + res, err := trafficfilterapi.Get(trafficfilterapi.GetParams{ - API: client, - ID: d.Get("traffic_filter_id").(string), + API: t.provider.GetClient(), + ID: state.TrafficFilterID.Value, IncludeAssociations: true, }) if err != nil { if util.TrafficFilterNotFound(err) { - d.SetId("") - return nil + response.State.RemoveResource(ctx) + return } - return diag.FromErr(err) + response.Diagnostics.AddError(err.Error(), err.Error()) + return } - if err := flatten(res, d); err != nil { - return diag.FromErr(err) + if res == nil { + response.State.RemoveResource(ctx) + return } - return nil + var found bool + for _, assoc := range res.Associations { + if *assoc.EntityType == entityTypeDeployment && *assoc.ID == state.DeploymentID.Value { + found = true + } + } + + if !found { + response.State.RemoveResource(ctx) + return + } } diff --git a/ec/ecresource/trafficfilterassocresource/read_test.go b/ec/ecresource/trafficfilterassocresource/read_test.go deleted file mode 100644 index 4dd437ffa..000000000 --- a/ec/ecresource/trafficfilterassocresource/read_test.go +++ /dev/null @@ -1,114 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package trafficfilterassocresource - -import ( - "context" - "testing" - - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -func Test_read(t *testing.T) { - tc500Err := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleTrafficFilterAssociation(), - Schema: newSchema(), - }) - wantTC500 := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleTrafficFilterAssociation(), - Schema: newSchema(), - }) - - tc404Err := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleTrafficFilterAssociation(), - Schema: newSchema(), - }) - wantTC404 := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleTrafficFilterAssociation(), - Schema: newSchema(), - }) - wantTC404.SetId("") - type args struct { - ctx context.Context - d *schema.ResourceData - meta interface{} - } - tests := []struct { - name string - args args - want diag.Diagnostics - wantRD *schema.ResourceData - }{ - { - name: "returns an error when it receives a 500", - args: args{ - d: tc500Err, - meta: api.NewMock(mock.NewErrorResponse(500, mock.APIError{ - Code: "some", Message: "message", - })), - }, - want: diag.Diagnostics{ - { - Severity: diag.Error, - Summary: "api error: 1 error occurred:\n\t* some: message\n\n", - }, - }, - wantRD: wantTC500, - }, - { - name: "returns nil and unsets the state when the error is known", - args: args{ - d: tc404Err, - meta: api.NewMock(mock.NewErrorResponse(404, mock.APIError{ - Code: "some", Message: "message", - })), - }, - want: nil, - wantRD: wantTC404, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := read(tt.args.ctx, tt.args.d, tt.args.meta) - assert.Equal(t, tt.want, got) - var want interface{} - if tt.wantRD != nil { - if s := tt.wantRD.State(); s != nil { - want = s.Attributes - } - } - - var gotState interface{} - if s := tt.args.d.State(); s != nil { - gotState = s.Attributes - } - - assert.Equal(t, want, gotState) - }) - } -} diff --git a/ec/ecresource/trafficfilterassocresource/resource.go b/ec/ecresource/trafficfilterassocresource/resource.go deleted file mode 100644 index 4a402d894..000000000 --- a/ec/ecresource/trafficfilterassocresource/resource.go +++ /dev/null @@ -1,40 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package trafficfilterassocresource - -import ( - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -// Resource returns the ec_deployment_traffic_filter_association resource schema. -func Resource() *schema.Resource { - return &schema.Resource{ - Description: "Elastic Cloud deployment traffic filtering association", - Schema: newSchema(), - - CreateContext: create, - ReadContext: read, - DeleteContext: delete, - - Timeouts: &schema.ResourceTimeout{ - Default: schema.DefaultTimeout(10 * time.Minute), - }, - } -} diff --git a/ec/ecresource/trafficfilterassocresource/resource_test.go b/ec/ecresource/trafficfilterassocresource/resource_test.go new file mode 100644 index 000000000..8b64425d3 --- /dev/null +++ b/ec/ecresource/trafficfilterassocresource/resource_test.go @@ -0,0 +1,324 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package trafficfilterassocresource_test + +import ( + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/terraform-provider-ec/ec" + "github.com/hashicorp/terraform-plugin-framework/providerserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "net/url" + "regexp" + "testing" + + r "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestResourceTrafficFilterAssoc(t *testing.T) { + + r.UnitTest(t, r.TestCase{ + ProtoV5ProviderFactories: protoV5ProviderFactoriesWithMockClient( + api.NewMock( + createResponse(), + readResponse(), + readResponse(), + readResponse(), + readResponse(), + deleteResponse(), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: trafficFilterAssoc, + Check: checkResource(), + }, + { // Ensure that it can be successfully read + PlanOnly: true, + Config: trafficFilterAssoc, + Check: checkResource(), + }, + { // Delete resource + Destroy: true, + Config: trafficFilterAssoc, + }, + }, + }) +} + +func TestResourceTrafficFilterAssoc_externalDeletion1(t *testing.T) { + r.UnitTest(t, r.TestCase{ + ProtoV5ProviderFactories: protoV5ProviderFactoriesWithMockClient( + api.NewMock( + createResponse(), + readResponse(), + readResponseAssociationDeleted(), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: trafficFilterAssoc, + Check: checkResource(), + }, + { // Ensure that it gets unset if deleted externally + PlanOnly: true, + ExpectNonEmptyPlan: true, + Config: trafficFilterAssoc, + Check: checkResourceDeleted(), + }, + }, + }) +} +func TestResourceTrafficFilterAssoc_externalDeletion2(t *testing.T) { + r.UnitTest(t, r.TestCase{ + ProtoV5ProviderFactories: protoV5ProviderFactoriesWithMockClient( + api.NewMock( + createResponse(), + readResponse(), + readResponseTrafficFilterDeleted(), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: trafficFilterAssoc, + Check: checkResource(), + }, + { // Ensure that it gets unset if deleted externally + PlanOnly: true, + ExpectNonEmptyPlan: true, + Config: trafficFilterAssoc, + Check: checkResourceDeleted(), + }, + }, + }) +} + +func TestResourceTrafficFilterAssoc_gracefulDeletion(t *testing.T) { + r.UnitTest(t, r.TestCase{ + ProtoV5ProviderFactories: protoV5ProviderFactoriesWithMockClient( + api.NewMock( + createResponse(), + readResponse(), + readResponse(), + alreadyDeletedResponse(), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: trafficFilterAssoc, + Check: checkResource(), + }, + { // Delete resource + Destroy: true, + Config: trafficFilterAssoc, + }, + }, + }) +} + +func TestResourceTrafficFilterAssoc_failedDeletion(t *testing.T) { + r.UnitTest(t, r.TestCase{ + ProtoV5ProviderFactories: protoV5ProviderFactoriesWithMockClient( + api.NewMock( + createResponse(), + readResponse(), + readResponse(), + failedDeletionResponse(), + deleteResponse(), + ), + ), + Steps: []r.TestStep{ + { + Config: trafficFilterAssoc, + }, + { + Destroy: true, + Config: trafficFilterAssoc, + ExpectError: regexp.MustCompile(`internal.server.error: There was an internal server error`), + }, + }, + }) +} + +func TestResourceTrafficFilterAssoc_importState(t *testing.T) { + r.UnitTest(t, r.TestCase{ + ProtoV5ProviderFactories: protoV5ProviderFactoriesWithMockClient( + api.NewMock( + readResponse(), + ), + ), + Steps: []r.TestStep{ + { + ImportState: true, + ImportStateId: "0a592ab2c5baf0fa95c77ac62135782e,9db94e68e2f040a19dfb664d0e83bc2a", + ResourceName: "ec_deployment_traffic_filter_association.test1", + Config: trafficFilterAssoc, + Check: checkResource(), + }, + }, + }) +} + +const trafficFilterAssoc = ` + resource "ec_deployment_traffic_filter_association" "test1" { + traffic_filter_id = "9db94e68e2f040a19dfb664d0e83bc2a" + deployment_id = "0a592ab2c5baf0fa95c77ac62135782e" + } +` + +func checkResource() r.TestCheckFunc { + return r.ComposeAggregateTestCheckFunc( + r.TestCheckResourceAttr("ec_deployment_traffic_filter_association.test1", "id", "0a592ab2c5baf0fa95c77ac62135782e-9db94e68e2f040a19dfb664d0e83bc2a"), + r.TestCheckResourceAttr("ec_deployment_traffic_filter_association.test1", "traffic_filter_id", "9db94e68e2f040a19dfb664d0e83bc2a"), + r.TestCheckResourceAttr("ec_deployment_traffic_filter_association.test1", "deployment_id", "0a592ab2c5baf0fa95c77ac62135782e"), + ) +} + +func checkResourceDeleted() r.TestCheckFunc { + return r.ComposeAggregateTestCheckFunc( + r.TestCheckNoResourceAttr("ec_deployment_traffic_filter_association.test1", "id"), + r.TestCheckNoResourceAttr("ec_deployment_traffic_filter_association.test1", "traffic_filter_id"), + r.TestCheckNoResourceAttr("ec_deployment_traffic_filter_association.test1", "deployment_id"), + ) +} + +func createResponse() mock.Response { + return mock.New200ResponseAssertion( + &mock.RequestAssertion{ + Host: api.DefaultMockHost, + Header: api.DefaultWriteMockHeaders, + Method: "POST", + Path: "/api/v1/deployments/traffic-filter/rulesets/9db94e68e2f040a19dfb664d0e83bc2a/associations", + Query: url.Values{}, + Body: mock.NewStringBody(`{"entity_type":"deployment","id":"0a592ab2c5baf0fa95c77ac62135782e"}` + "\n"), + }, + mock.NewStringBody(`{"entity_type":"deployment","id":"0a592ab2c5baf0fa95c77ac62135782e"}`), + ) +} + +func readResponse() mock.Response { + return mock.New200ResponseAssertion( + &mock.RequestAssertion{ + Host: api.DefaultMockHost, + Header: api.DefaultReadMockHeaders, + Method: "GET", + Path: "/api/v1/deployments/traffic-filter/rulesets/9db94e68e2f040a19dfb664d0e83bc2a", + Query: url.Values{ + "include_associations": []string{"true"}, + }, + }, + mock.NewStringBody(`{ + "id": "9db94e68e2f040a19dfb664d0e83bc2a", + "name": "dummy", + "type": "ip", + "include_by_default": false, + "region": "us-east-1", + "rules": [{"id": "6e4c8874f90d4793a2290f8199461952","source": "127.0.0.1"} ], + "associations": [{"entity_type": "deployment", "id": "0a592ab2c5baf0fa95c77ac62135782e"}], + "total_associations": 1 + }`, + ), + ) +} + +func readResponseAssociationDeleted() mock.Response { + return mock.New200ResponseAssertion( + &mock.RequestAssertion{ + Host: api.DefaultMockHost, + Header: api.DefaultReadMockHeaders, + Method: "GET", + Path: "/api/v1/deployments/traffic-filter/rulesets/9db94e68e2f040a19dfb664d0e83bc2a", + Query: url.Values{ + "include_associations": []string{"true"}, + }, + }, + mock.NewStringBody(`{ + "id": "9db94e68e2f040a19dfb664d0e83bc2a", + "name": "dummy", + "type": "ip", + "include_by_default": false, + "region": "us-east-1", + "rules": [{"id": "6e4c8874f90d4793a2290f8199461952","source": "127.0.0.1"} ], + "associations": [{"entity_type": "deployment", "id": "some-unrelated-id"}], + "total_associations": 1 + }`, + ), + ) +} + +func readResponseTrafficFilterDeleted() mock.Response { + return mock.New404ResponseAssertion( + &mock.RequestAssertion{ + Host: api.DefaultMockHost, + Header: api.DefaultReadMockHeaders, + Method: "GET", + Path: "/api/v1/deployments/traffic-filter/rulesets/9db94e68e2f040a19dfb664d0e83bc2a", + Query: url.Values{ + "include_associations": []string{"true"}, + }, + }, + mock.NewStringBody(`{ }`), + ) +} + +func deleteResponse() mock.Response { + return mock.New200ResponseAssertion( + &mock.RequestAssertion{ + Host: api.DefaultMockHost, + Header: api.DefaultReadMockHeaders, + Method: "DELETE", + Path: "/api/v1/deployments/traffic-filter/rulesets/9db94e68e2f040a19dfb664d0e83bc2a/associations/deployment/0a592ab2c5baf0fa95c77ac62135782e", + Query: url.Values{}, + }, + mock.NewStringBody(`{}`), + ) +} + +func alreadyDeletedResponse() mock.Response { + return mock.New404ResponseAssertion( + &mock.RequestAssertion{ + Host: api.DefaultMockHost, + Header: api.DefaultReadMockHeaders, + Method: "DELETE", + Path: "/api/v1/deployments/traffic-filter/rulesets/9db94e68e2f040a19dfb664d0e83bc2a/associations/deployment/0a592ab2c5baf0fa95c77ac62135782e", + Query: url.Values{}, + }, + mock.NewStringBody(`{ }`), + ) +} +func failedDeletionResponse() mock.Response { + mock.SampleInternalError() + return mock.New500ResponseAssertion( + &mock.RequestAssertion{ + Host: api.DefaultMockHost, + Header: api.DefaultReadMockHeaders, + Method: "DELETE", + Path: "/api/v1/deployments/traffic-filter/rulesets/9db94e68e2f040a19dfb664d0e83bc2a/associations/deployment/0a592ab2c5baf0fa95c77ac62135782e", + Query: url.Values{}, + }, + mock.SampleInternalError().Response.Body, + ) +} + +func protoV5ProviderFactoriesWithMockClient(client *api.API) map[string]func() (tfprotov5.ProviderServer, error) { + return map[string]func() (tfprotov5.ProviderServer, error){ + "ec": func() (tfprotov5.ProviderServer, error) { + return providerserver.NewProtocol5(ec.ProviderWithClient(client, "unit-tests"))(), nil + }, + } +} diff --git a/ec/ecresource/trafficfilterassocresource/schema.go b/ec/ecresource/trafficfilterassocresource/schema.go index ec5e91d87..bd0460d84 100644 --- a/ec/ecresource/trafficfilterassocresource/schema.go +++ b/ec/ecresource/trafficfilterassocresource/schema.go @@ -18,23 +18,68 @@ package trafficfilterassocresource import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "context" + "github.com/elastic/terraform-provider-ec/ec/internal" + "github.com/hashicorp/terraform-plugin-framework/diag" + tpfprovider "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" ) -// newSchema returns the schema for an "ec_deployment_traffic_filter_association" resource. -func newSchema() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "deployment_id": { - Type: schema.TypeString, - Description: `Required deployment ID where the traffic filter will be associated`, - Required: true, - ForceNew: true, - }, - "traffic_filter_id": { - Type: schema.TypeString, - Description: "Required traffic filter ruleset ID to tie to a deployment", - Required: true, - ForceNew: true, +// Ensure provider defined types fully satisfy framework interfaces +var _ tpfprovider.ResourceType = ResourceType{} +var _ resource.Resource = trafficFilterAssocResource{} + +var _ resource.ResourceWithImportState = trafficFilterAssocResource{} + +type ResourceType struct{} + +const entityTypeDeployment = "deployment" + +func (t ResourceType) GetSchema(_ context.Context) (tfsdk.Schema, diag.Diagnostics) { + return tfsdk.Schema{ + Attributes: map[string]tfsdk.Attribute{ + "deployment_id": { + Type: types.StringType, + Description: `Required deployment ID where the traffic filter will be associated`, + Required: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.RequiresReplace(), + }, + }, + "traffic_filter_id": { + Type: types.StringType, + Description: "Required traffic filter ruleset ID to tie to a deployment", + Required: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.RequiresReplace(), + }, + }, + // Computed attributes + "id": { + Type: types.StringType, + Computed: true, + MarkdownDescription: "Unique identifier of this resource.", + }, }, - } + }, nil +} + +func (t ResourceType) NewResource(_ context.Context, provider tpfprovider.Provider) (resource.Resource, diag.Diagnostics) { + p, diags := internal.ConvertProviderType(provider) + + return &trafficFilterAssocResource{ + provider: p, + }, diags +} + +type trafficFilterAssocResource struct { + provider internal.Provider +} + +type modelV0 struct { + ID types.String `tfsdk:"id"` + DeploymentID types.String `tfsdk:"deployment_id"` + TrafficFilterID types.String `tfsdk:"traffic_filter_id"` } diff --git a/ec/ecresource/trafficfilterassocresource/testutils.go b/ec/ecresource/trafficfilterassocresource/update.go similarity index 73% rename from ec/ecresource/trafficfilterassocresource/testutils.go rename to ec/ecresource/trafficfilterassocresource/update.go index b81f2f193..b5a2017c5 100644 --- a/ec/ecresource/trafficfilterassocresource/testutils.go +++ b/ec/ecresource/trafficfilterassocresource/update.go @@ -18,14 +18,10 @@ package trafficfilterassocresource import ( - "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "context" + "github.com/hashicorp/terraform-plugin-framework/resource" ) -var mockTrafficFilterID = "420b7b540dfc967a7a649c18e2fce4e4" - -func newSampleTrafficFilterAssociation() map[string]interface{} { - return map[string]interface{}{ - "deployment_id": mock.ValidClusterID, - "traffic_filter_id": mockTrafficFilterID, - } +func (t trafficFilterAssocResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + panic("ec_deployment_traffic_filter_association resources can not be updated!") } diff --git a/ec/ecresource/trafficfilterassocresource/flatteners.go b/ec/internal/flatteners/flatten_endpoint.go similarity index 57% rename from ec/ecresource/trafficfilterassocresource/flatteners.go rename to ec/internal/flatteners/flatten_endpoint.go index 5c419f4c8..3af63a361 100644 --- a/ec/ecresource/trafficfilterassocresource/flatteners.go +++ b/ec/internal/flatteners/flatten_endpoint.go @@ -15,35 +15,28 @@ // specific language governing permissions and limitations // under the License. -package trafficfilterassocresource +package flatteners import ( + "fmt" + "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -func flatten(res *models.TrafficFilterRulesetInfo, d *schema.ResourceData) error { - if res == nil { - return nil +// FlattenClusterEndpoint receives a ClusterMetadataInfo, parses the http and +// https endpoints and returns a map with two keys: `http_endpoint` and +// `https_endpoint` +func FlattenEndpoints(metadata *models.ClusterMetadataInfo) (httpEndpoint string, httpsEndpoint string) { + if metadata == nil || metadata.Endpoint == "" || metadata.Ports == nil { + return } - var found bool - deploymentID := d.Get("deployment_id").(string) - for _, assoc := range res.Associations { - if *assoc.EntityType == entityType && *assoc.ID == deploymentID { - found = true - } + if metadata.Ports.HTTP != nil { + httpEndpoint = fmt.Sprintf("http://%s:%d", metadata.Endpoint, *metadata.Ports.HTTP) } - if !found { - if err := d.Set("deployment_id", ""); err != nil { - return err - } - if err := d.Set("traffic_filter_id", ""); err != nil { - return err - } - d.SetId("") + if metadata.Ports.HTTPS != nil { + httpsEndpoint = fmt.Sprintf("https://%s:%d", metadata.Endpoint, *metadata.Ports.HTTPS) } - - return nil + return } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_tags.go b/ec/internal/flatteners/flatten_tags.go similarity index 82% rename from ec/ecdatasource/deploymentdatasource/flatteners_tags.go rename to ec/internal/flatteners/flatten_tags.go index 1605b0dec..57d05c01e 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_tags.go +++ b/ec/internal/flatteners/flatten_tags.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. -package deploymentdatasource +package flatteners import ( "github.com/elastic/cloud-sdk-go/pkg/models" @@ -25,18 +25,12 @@ import ( // flattenTags takes in Deployment Metadata resource models and returns its // Tags in flattened form. -func flattenTags(metadata *models.DeploymentMetadata) types.Map { - - if metadata == nil || metadata.Tags == nil { - return types.Map{ElemType: types.StringType, Elems: map[string]attr.Value{}} - } - +func FlattenTags(metadataItems []*models.MetadataItem) types.Map { var tags = make(map[string]attr.Value) - for _, res := range metadata.Tags { + for _, res := range metadataItems { if res.Key != nil { tags[*res.Key] = types.String{Value: *res.Value} } } return types.Map{ElemType: types.StringType, Elems: tags} - } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_tags_test.go b/ec/internal/flatteners/flatten_tags_test.go similarity index 92% rename from ec/ecdatasource/deploymentdatasource/flatteners_tags_test.go rename to ec/internal/flatteners/flatten_tags_test.go index 6bc722462..9b28f6f85 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_tags_test.go +++ b/ec/internal/flatteners/flatten_tags_test.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. -package deploymentdatasource +package flatteners import ( "context" @@ -35,11 +35,6 @@ func TestFlattenTags(t *testing.T) { args args want map[string]string }{ - { - name: "flattens no metadata tags when empty", - args: args{}, - want: map[string]string{}, - }, { name: "flattens no metadata tags when empty", args: args{metadata: &models.DeploymentMetadata{}}, @@ -76,7 +71,7 @@ func TestFlattenTags(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result := flattenTags(tt.args.metadata) + result := FlattenTags(tt.args.metadata.Tags) got := make(map[string]string, len(result.Elems)) result.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) diff --git a/ec/internal/planmodifier/default_from_env.go b/ec/internal/planmodifier/default_from_env.go index 04de4cc43..77b2fd3cc 100644 --- a/ec/internal/planmodifier/default_from_env.go +++ b/ec/internal/planmodifier/default_from_env.go @@ -1,3 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + package planmodifier import ( diff --git a/ec/internal/planmodifier/default_value.go b/ec/internal/planmodifier/default_value.go index 6f3b8e4b9..75ad85026 100644 --- a/ec/internal/planmodifier/default_value.go +++ b/ec/internal/planmodifier/default_value.go @@ -1,3 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + // NOTE! copied from terraform-provider-tls package planmodifier diff --git a/ec/internal/provider.go b/ec/internal/provider.go index b407dd1ca..ce0e3f6aa 100644 --- a/ec/internal/provider.go +++ b/ec/internal/provider.go @@ -18,7 +18,10 @@ package internal import ( + "fmt" + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/provider" ) @@ -27,3 +30,32 @@ type Provider interface { provider.Provider GetClient() *api.API } + +// ConvertProviderType is a helper function for NewResource and NewDataSource +// implementations to associate the concrete provider type. Alternatively, +// this helper can be skipped and the provider type can be directly type +// asserted (e.g. provider: in.(*provider)), however using this can prevent +// potential panics. +func ConvertProviderType(in provider.Provider) (Provider, diag.Diagnostics) { + var diags diag.Diagnostics + + p, ok := in.(Provider) + + if !ok { + diags.AddError( + "Unexpected Provider Instance Type", + fmt.Sprintf("While creating the data source or resource, an unexpected provider type (%T) was received. This is always a bug in the provider code and should be reported to the provider developers.", p), + ) + return p, diags + } + + if p == nil { + diags.AddError( + "Unexpected Provider Instance Type", + "While creating the data source or resource, an unexpected empty provider instance was received. This is always a bug in the provider code and should be reported to the provider developers.", + ) + return p, diags + } + + return p, diags +} diff --git a/ec/internal/validators/knownvalidator.go b/ec/internal/validators/knownvalidator.go index 82593e66c..129c93506 100644 --- a/ec/internal/validators/knownvalidator.go +++ b/ec/internal/validators/knownvalidator.go @@ -1,3 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + package validators import ( diff --git a/ec/internal/validators/urlvalidator.go b/ec/internal/validators/urlvalidator.go index a66f44d34..8de0dbc78 100644 --- a/ec/internal/validators/urlvalidator.go +++ b/ec/internal/validators/urlvalidator.go @@ -1,13 +1,31 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + package validators import ( "context" "fmt" + "net/url" + "strings" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" "golang.org/x/exp/slices" - "net/url" - "strings" ) type isURLWithSchemeValidator struct { @@ -79,5 +97,5 @@ func (v isURLWithSchemeValidator) Validate(ctx context.Context, req tfsdk.Valida } func IsURLWithSchemeValidator(validSchemes []string) tfsdk.AttributeValidator { - return isURLWithSchemeValidator{} + return isURLWithSchemeValidator{ValidSchemes: validSchemes} } diff --git a/ec/provider.go b/ec/provider.go index 917474531..c613f2c9d 100644 --- a/ec/provider.go +++ b/ec/provider.go @@ -20,6 +20,8 @@ package ec import ( "context" "fmt" + "time" + "github.com/elastic/terraform-provider-ec/ec/ecdatasource/deploymentdatasource" "github.com/elastic/terraform-provider-ec/ec/ecdatasource/deploymentsdatasource" "github.com/elastic/terraform-provider-ec/ec/ecdatasource/stackdatasource" @@ -34,7 +36,6 @@ import ( "github.com/elastic/terraform-provider-ec/ec/internal/validators" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "time" "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/hashicorp/terraform-plugin-framework/diag" @@ -74,11 +75,10 @@ func LegacyProvider() *schema.Provider { Schema: newSchema(), DataSourcesMap: map[string]*schema.Resource{}, ResourcesMap: map[string]*schema.Resource{ - "ec_deployment": deploymentresource.Resource(), - "ec_deployment_elasticsearch_keystore": elasticsearchkeystoreresource.Resource(), - "ec_deployment_traffic_filter": trafficfilterresource.Resource(), - "ec_deployment_traffic_filter_association": trafficfilterassocresource.Resource(), - "ec_deployment_extension": extensionresource.Resource(), + "ec_deployment": deploymentresource.Resource(), + "ec_deployment_elasticsearch_keystore": elasticsearchkeystoreresource.Resource(), + "ec_deployment_traffic_filter": trafficfilterresource.Resource(), + "ec_deployment_extension": extensionresource.Resource(), }, } } @@ -138,18 +138,23 @@ func newSchema() map[string]*schema.Schema { } } -func New() provider.Provider { - return &Provider{} +func New(version string) provider.Provider { + return &Provider{version: version} +} + +func ProviderWithClient(client *api.API, version string) provider.Provider { + return &Provider{client: client, version: version} } var _ internal.Provider = (*Provider)(nil) func (p *Provider) GetClient() *api.API { - return p.Client + return p.client } type Provider struct { - Client *api.API + version string + client *api.API } func (p *Provider) GetSchema(context.Context) (tfsdk.Schema, diag.Diagnostics) { @@ -223,6 +228,10 @@ type providerData struct { } func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, res *provider.ConfigureResponse) { + if p.client != nil { + return + } + // Retrieve provider data from configuration var config providerData diags := req.Config.Get(ctx, &config) @@ -336,7 +345,7 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, return } - p.Client, err = api.NewAPI(cfg) + p.client, err = api.NewAPI(cfg) if err != nil { res.Diagnostics.AddWarning( "Unable to create api Client config", @@ -347,7 +356,9 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, } func (p *Provider) GetResources(_ context.Context) (map[string]provider.ResourceType, diag.Diagnostics) { - return map[string]provider.ResourceType{}, nil + return map[string]provider.ResourceType{ + "ec_deployment_traffic_filter_association": trafficfilterassocresource.ResourceType{}, + }, nil } func (p *Provider) GetDataSources(_ context.Context) (map[string]provider.DataSourceType, diag.Diagnostics) { diff --git a/ec/provider_config.go b/ec/provider_config.go index acef22ef5..6e8969fd5 100644 --- a/ec/provider_config.go +++ b/ec/provider_config.go @@ -20,13 +20,14 @@ package ec import ( "context" "fmt" - "github.com/elastic/terraform-provider-ec/ec/internal/util" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "net/http" "os" "time" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/auth" ) @@ -129,7 +130,7 @@ func verboseSettings(name string, verbose, redactAuth bool) (api.VerboseSettings } return api.VerboseSettings{ - Verbose: true, + Verbose: verbose, RedactAuth: redactAuth, Device: f, }, nil diff --git a/examples/deployment_with_init/main.tf b/examples/deployment_with_init/main.tf index b381e5491..06597b49c 100644 --- a/examples/deployment_with_init/main.tf +++ b/examples/deployment_with_init/main.tf @@ -1,16 +1,10 @@ resource "null_resource" "bootstrap-elasticsearch" { provisioner "local-exec" { - command = data.template_file.elasticsearch-configuration.rendered - } -} - -data "template_file" "elasticsearch-configuration" { - template = file("es_config.sh") - depends_on = [ec_deployment.example_minimal] - vars = { # Created servers and appropriate AZs - elastic-user = ec_deployment.example_minimal.elasticsearch_username - elastic-password = ec_deployment.example_minimal.elasticsearch_password - es-url = ec_deployment.example_minimal.elasticsearch[0].https_endpoint + command = templatefile("es_config.sh", { + elastic-user = ec_deployment.example_minimal.elasticsearch_username + elastic-password = ec_deployment.example_minimal.elasticsearch_password + es-url = ec_deployment.example_minimal.elasticsearch[0].https_endpoint + }) } -} \ No newline at end of file +} diff --git a/go.mod b/go.mod index f2c63afd2..2eb44e9e5 100644 --- a/go.mod +++ b/go.mod @@ -3,12 +3,14 @@ module github.com/elastic/terraform-provider-ec go 1.19 require ( + github.com/blang/semver v3.5.1+incompatible github.com/blang/semver/v4 v4.0.0 github.com/elastic/cloud-sdk-go v1.10.0 github.com/go-openapi/runtime v0.24.1 github.com/go-openapi/strfmt v0.21.3 github.com/hashicorp/terraform-plugin-framework v0.11.1 github.com/hashicorp/terraform-plugin-go v0.14.0 + github.com/hashicorp/terraform-plugin-log v0.7.0 github.com/hashicorp/terraform-plugin-mux v0.7.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.21.0 github.com/stretchr/testify v1.8.0 @@ -46,7 +48,6 @@ require ( github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.17.2 // indirect github.com/hashicorp/terraform-json v0.14.0 // indirect - github.com/hashicorp/terraform-plugin-log v0.7.0 // indirect github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c // indirect github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 // indirect github.com/hashicorp/yamux v0.1.1 // indirect diff --git a/go.sum b/go.sum index d1e0d7ea1..c4c8cf567 100644 --- a/go.sum +++ b/go.sum @@ -31,6 +31,8 @@ github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:W github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= diff --git a/main.go b/main.go index 1e989d2fe..f265a1c85 100644 --- a/main.go +++ b/main.go @@ -20,10 +20,11 @@ package main import ( "context" "flag" - "github.com/hashicorp/terraform-plugin-framework/providerserver" "log" "github.com/elastic/terraform-provider-ec/ec" + + "github.com/hashicorp/terraform-plugin-framework/providerserver" "github.com/hashicorp/terraform-plugin-go/tfprotov6" "github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server" "github.com/hashicorp/terraform-plugin-mux/tf5to6server" @@ -51,9 +52,7 @@ func main() { ctx := context.Background() providers := []func() tfprotov6.ProviderServer{ func() tfprotov6.ProviderServer { return upgradedSdkProvider }, - func() tfprotov6.ProviderServer { - return providerserver.NewProtocol6(ec.New())() - }, + providerserver.NewProtocol6(ec.New(ec.Version)), } muxServer, err := tf6muxserver.NewMuxServer(ctx, providers...) From 8a7997048d18307bd64a6f8058c707d4ba75b20b Mon Sep 17 00:00:00 2001 From: Pascal Hofmann Date: Thu, 15 Sep 2022 11:59:47 +0200 Subject: [PATCH 006/104] Update to terraform-plugin-framework 0.12 and fix import order --- ec/acc/acc_prereq.go | 1 + ec/acc/deployment_checks_test.go | 5 +- ec/acc/deployment_destroy_test.go | 3 +- ...ent_elasticsearch_kesytore_destroy_test.go | 3 +- .../deployment_elasticsearch_kesytore_test.go | 3 +- .../deployment_extension_bundle_file_test.go | 3 +- ec/acc/deployment_extension_destroy_test.go | 3 +- .../deployment_failed_upgrade_retry_test.go | 2 +- ec/acc/deployment_sweep_test.go | 3 +- .../deployment_traffic_filter_checks_test.go | 5 +- .../deployment_traffic_filter_destroy_test.go | 3 +- .../deployment_traffic_filter_sweep_test.go | 3 +- .../deployment_with_extension_bundle_test.go | 3 +- .../deploymentdatasource/datasource.go | 39 +++++++---- .../deploymentdatasource/datasource_test.go | 9 ++- .../deploymentdatasource/flatteners_apm.go | 4 +- .../flatteners_apm_test.go | 3 +- .../flatteners_elasticsearch.go | 3 +- .../flatteners_elasticsearch_test.go | 7 +- .../flatteners_enterprise_search.go | 4 +- .../flatteners_enterprise_search_test.go | 7 +- .../flatteners_integrations_server.go | 4 +- .../flatteners_integrations_server_test.go | 7 +- .../deploymentdatasource/flatteners_kibana.go | 4 +- .../flatteners_kibana_test.go | 7 +- .../flatteners_observability.go | 4 +- .../flatteners_observability_test.go | 5 +- .../flatteners_traffic_filter.go | 4 +- .../flatteners_traffic_filter_test.go | 3 +- .../deploymentdatasource/schema.go | 3 +- .../deploymentsdatasource/datasource.go | 44 +++++++----- .../deploymentsdatasource/datasource_test.go | 6 +- .../deploymentsdatasource/expanders.go | 6 +- .../deploymentsdatasource/expanders_test.go | 9 ++- .../deploymentsdatasource/schema.go | 6 +- ec/ecdatasource/stackdatasource/datasource.go | 47 ++++++++----- .../stackdatasource/datasource_test.go | 11 +-- .../stackdatasource/flatteners_apm.go | 4 +- .../stackdatasource/flatteners_apm_test.go | 6 +- .../flatteners_elasticsearch.go | 4 +- .../flatteners_elasticsearch_test.go | 6 +- .../flatteners_enterprise_search.go | 4 +- .../flatteners_enterprise_search_test.go | 6 +- .../stackdatasource/flatteners_kibana.go | 4 +- .../stackdatasource/flatteners_kibana_test.go | 6 +- ec/ecdatasource/stackdatasource/schema.go | 3 +- .../deploymentresource/apm_expanders_test.go | 3 +- .../deploymentresource/apm_flatteners_test.go | 3 +- ec/ecresource/deploymentresource/create.go | 5 +- ec/ecresource/deploymentresource/delete.go | 7 +- .../deploymentresource/delete_test.go | 8 ++- .../elasticsearch_expanders.go | 3 +- .../elasticsearch_expanders_test.go | 5 +- .../elasticsearch_flatteners.go | 3 +- .../elasticsearch_flatteners_test.go | 5 +- .../elasticsearch_remote_cluster_expanders.go | 3 +- ...ticsearch_remote_cluster_expanders_test.go | 5 +- .../enterprise_search_expanders_test.go | 3 +- .../enterprise_search_flatteners_test.go | 3 +- ec/ecresource/deploymentresource/expanders.go | 5 +- .../deploymentresource/expanders_test.go | 5 +- .../deploymentresource/flatteners.go | 5 +- .../deploymentresource/flatteners_test.go | 5 +- ec/ecresource/deploymentresource/import.go | 3 +- .../deploymentresource/import_test.go | 5 +- .../integrations_server_expanders_test.go | 3 +- .../integrations_server_flatteners_test.go | 3 +- .../kibana_expanders_test.go | 3 +- .../kibana_flatteners_test.go | 3 +- .../deploymentresource/observability_test.go | 3 +- ec/ecresource/deploymentresource/read.go | 5 +- ec/ecresource/deploymentresource/read_test.go | 10 +-- .../schema_elasticsearch.go | 3 +- .../stopped_resource_test.go | 3 +- .../deploymentresource/testutil_func.go | 3 +- .../deploymentresource/testutil_func_test.go | 3 +- .../deploymentresource/traffic_filter.go | 3 +- .../deploymentresource/traffic_filter_test.go | 3 +- ec/ecresource/deploymentresource/update.go | 5 +- .../deploymentresource/update_test.go | 3 +- .../update_traffic_rules.go | 3 +- .../elasticsearchkeystoreresource/create.go | 3 +- .../elasticsearchkeystoreresource/delete.go | 3 +- .../expanders.go | 3 +- .../expanders_test.go | 5 +- .../elasticsearchkeystoreresource/read.go | 5 +- .../read_test.go | 5 +- .../elasticsearchkeystoreresource/update.go | 3 +- ec/ecresource/extensionresource/create.go | 5 +- .../extensionresource/create_test.go | 8 ++- ec/ecresource/extensionresource/delete.go | 5 +- .../extensionresource/delete_test.go | 8 ++- ec/ecresource/extensionresource/read.go | 5 +- ec/ecresource/extensionresource/read_test.go | 8 +-- ec/ecresource/extensionresource/update.go | 5 +- .../extensionresource/update_test.go | 10 +-- ec/ecresource/extensionresource/upload.go | 3 +- .../trafficfilterassocresource/create.go | 18 ++++- .../trafficfilterassocresource/delete.go | 18 ++++- .../import_state.go | 5 +- .../trafficfilterassocresource/read.go | 19 ++++- .../resource_test.go | 12 ++-- .../trafficfilterassocresource/schema.go | 34 +++++---- .../trafficfilterassocresource/update.go | 3 +- ec/ecresource/trafficfilterresource/create.go | 5 +- ec/ecresource/trafficfilterresource/delete.go | 5 +- .../trafficfilterresource/delete_test.go | 9 ++- .../trafficfilterresource/expanders.go | 3 +- .../trafficfilterresource/expanders_test.go | 5 +- .../trafficfilterresource/flatteners.go | 3 +- .../trafficfilterresource/flatteners_test.go | 5 +- ec/ecresource/trafficfilterresource/read.go | 5 +- .../trafficfilterresource/read_test.go | 8 ++- ec/ecresource/trafficfilterresource/update.go | 5 +- ec/internal/flatteners/flatten_tags.go | 3 +- ec/internal/flatteners/flatten_tags_test.go | 3 +- ec/internal/planmodifier/default_from_env.go | 5 +- ec/internal/provider.go | 39 ++++------- ec/internal/util/helpers.go | 5 +- ec/internal/util/helpers_test.go | 3 +- ec/internal/util/parsers_test.go | 3 +- ec/internal/util/testutils.go | 3 +- ec/internal/util/traffic_filter_err_test.go | 3 +- ec/internal/validators/knownvalidator.go | 1 + ec/provider.go | 69 +++++++++++-------- ec/provider_config.go | 3 +- ec/provider_config_test.go | 5 +- gen/gen.go | 1 + go.mod | 22 +++--- go.sum | 23 +++++++ 130 files changed, 579 insertions(+), 330 deletions(-) diff --git a/ec/acc/acc_prereq.go b/ec/acc/acc_prereq.go index e079cd6e0..516f07de9 100644 --- a/ec/acc/acc_prereq.go +++ b/ec/acc/acc_prereq.go @@ -29,6 +29,7 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/auth" + "github.com/elastic/terraform-provider-ec/ec" ) diff --git a/ec/acc/deployment_checks_test.go b/ec/acc/deployment_checks_test.go index 5ae87ad86..c4a4cac96 100644 --- a/ec/acc/deployment_checks_test.go +++ b/ec/acc/deployment_checks_test.go @@ -20,11 +20,12 @@ package acc import ( "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deputil" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func testAccCheckDeploymentExists(name string) resource.TestCheckFunc { diff --git a/ec/acc/deployment_destroy_test.go b/ec/acc/deployment_destroy_test.go index e3817a613..5b70f5eff 100644 --- a/ec/acc/deployment_destroy_test.go +++ b/ec/acc/deployment_destroy_test.go @@ -20,9 +20,10 @@ package acc import ( "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func testAccDeploymentDestroy(s *terraform.State) error { diff --git a/ec/acc/deployment_elasticsearch_kesytore_destroy_test.go b/ec/acc/deployment_elasticsearch_kesytore_destroy_test.go index 6a4332355..6dea775b3 100644 --- a/ec/acc/deployment_elasticsearch_kesytore_destroy_test.go +++ b/ec/acc/deployment_elasticsearch_kesytore_destroy_test.go @@ -20,9 +20,10 @@ package acc import ( "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/eskeystoreapi" "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func testAccDeploymentElasticsearchKeystoreDestroy(s *terraform.State) error { diff --git a/ec/acc/deployment_elasticsearch_kesytore_test.go b/ec/acc/deployment_elasticsearch_kesytore_test.go index f6d557daa..01a7f9207 100644 --- a/ec/acc/deployment_elasticsearch_kesytore_test.go +++ b/ec/acc/deployment_elasticsearch_kesytore_test.go @@ -21,10 +21,11 @@ import ( "fmt" "testing" - "github.com/elastic/cloud-sdk-go/pkg/multierror" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/elastic/cloud-sdk-go/pkg/multierror" ) func TestAccDeploymentElasticsearchKeystore_full(t *testing.T) { diff --git a/ec/acc/deployment_extension_bundle_file_test.go b/ec/acc/deployment_extension_bundle_file_test.go index 8c7a267f3..25f8cc789 100644 --- a/ec/acc/deployment_extension_bundle_file_test.go +++ b/ec/acc/deployment_extension_bundle_file_test.go @@ -27,10 +27,11 @@ import ( "path/filepath" "testing" - "github.com/elastic/cloud-sdk-go/pkg/client/extensions" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/elastic/cloud-sdk-go/pkg/client/extensions" ) func TestAccDeploymentExtension_bundleFile(t *testing.T) { diff --git a/ec/acc/deployment_extension_destroy_test.go b/ec/acc/deployment_extension_destroy_test.go index 140e9f153..a715b506b 100644 --- a/ec/acc/deployment_extension_destroy_test.go +++ b/ec/acc/deployment_extension_destroy_test.go @@ -20,9 +20,10 @@ package acc import ( "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/elastic/cloud-sdk-go/pkg/api/apierror" "github.com/elastic/cloud-sdk-go/pkg/client/extensions" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func testAccExtensionDestroy(s *terraform.State) error { diff --git a/ec/acc/deployment_failed_upgrade_retry_test.go b/ec/acc/deployment_failed_upgrade_retry_test.go index df098b665..49d4d0505 100644 --- a/ec/acc/deployment_failed_upgrade_retry_test.go +++ b/ec/acc/deployment_failed_upgrade_retry_test.go @@ -23,7 +23,7 @@ import ( "regexp" "testing" - semver "github.com/blang/semver/v4" + "github.com/blang/semver/v4" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) diff --git a/ec/acc/deployment_sweep_test.go b/ec/acc/deployment_sweep_test.go index 01ab91e65..7909ea688 100644 --- a/ec/acc/deployment_sweep_test.go +++ b/ec/acc/deployment_sweep_test.go @@ -23,6 +23,8 @@ import ( "sync" "time" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" "github.com/elastic/cloud-sdk-go/pkg/models" @@ -30,7 +32,6 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/plan" "github.com/elastic/cloud-sdk-go/pkg/plan/planutil" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func init() { diff --git a/ec/acc/deployment_traffic_filter_checks_test.go b/ec/acc/deployment_traffic_filter_checks_test.go index 5170283da..1e15360d6 100644 --- a/ec/acc/deployment_traffic_filter_checks_test.go +++ b/ec/acc/deployment_traffic_filter_checks_test.go @@ -20,10 +20,11 @@ package acc import ( "fmt" - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" ) func testAccCheckDeploymentTrafficFilterExists(name string) resource.TestCheckFunc { diff --git a/ec/acc/deployment_traffic_filter_destroy_test.go b/ec/acc/deployment_traffic_filter_destroy_test.go index 2297c1888..a9568c0ac 100644 --- a/ec/acc/deployment_traffic_filter_destroy_test.go +++ b/ec/acc/deployment_traffic_filter_destroy_test.go @@ -20,9 +20,10 @@ package acc import ( "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func testAccDeploymentTrafficFilterDestroy(s *terraform.State) error { diff --git a/ec/acc/deployment_traffic_filter_sweep_test.go b/ec/acc/deployment_traffic_filter_sweep_test.go index eac607b54..54bd5241d 100644 --- a/ec/acc/deployment_traffic_filter_sweep_test.go +++ b/ec/acc/deployment_traffic_filter_sweep_test.go @@ -21,10 +21,11 @@ import ( "strings" "sync" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func init() { diff --git a/ec/acc/deployment_with_extension_bundle_test.go b/ec/acc/deployment_with_extension_bundle_test.go index 5c632c9b9..203994f1e 100644 --- a/ec/acc/deployment_with_extension_bundle_test.go +++ b/ec/acc/deployment_with_extension_bundle_test.go @@ -23,10 +23,11 @@ import ( "path/filepath" "testing" - "github.com/elastic/cloud-sdk-go/pkg/multierror" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/elastic/cloud-sdk-go/pkg/multierror" ) func TestAccDeployment_withExtension(t *testing.T) { diff --git a/ec/ecdatasource/deploymentdatasource/datasource.go b/ec/ecdatasource/deploymentdatasource/datasource.go index 1239f6041..ec5e5b396 100644 --- a/ec/ecdatasource/deploymentdatasource/datasource.go +++ b/ec/ecdatasource/deploymentdatasource/datasource.go @@ -23,9 +23,9 @@ import ( "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/diag" - "github.com/hashicorp/terraform-plugin-framework/provider" "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deputil" "github.com/elastic/cloud-sdk-go/pkg/models" @@ -35,25 +35,36 @@ import ( "github.com/elastic/terraform-provider-ec/ec/internal/util" ) -var _ provider.DataSourceType = (*DataSourceType)(nil) +var _ datasource.DataSource = &DataSource{} +var _ datasource.DataSourceWithConfigure = &DataSource{} +var _ datasource.DataSourceWithGetSchema = &DataSource{} +var _ datasource.DataSourceWithMetadata = &DataSource{} -type DataSourceType struct{} +type DataSource struct { + client *api.API +} -func (s DataSourceType) NewDataSource(ctx context.Context, in provider.Provider) (datasource.DataSource, diag.Diagnostics) { - p, diags := internal.ConvertProviderType(in) +func (d *DataSource) Configure(ctx context.Context, request datasource.ConfigureRequest, response *datasource.ConfigureResponse) { + client, diags := internal.ConvertProviderData(request.ProviderData) + response.Diagnostics.Append(diags...) + d.client = client +} - return &deploymentDataSource{ - p: p, - }, diags +func (d *DataSource) Metadata(ctx context.Context, request datasource.MetadataRequest, response *datasource.MetadataResponse) { + response.TypeName = request.ProviderTypeName + "_deployment" } -var _ datasource.DataSource = (*deploymentDataSource)(nil) +func (d DataSource) Read(ctx context.Context, request datasource.ReadRequest, response *datasource.ReadResponse) { + // Prevent panic if the provider has not been configured. + if d.client == nil { + response.Diagnostics.AddError( + "Unconfigured API Client", + "Expected configured API client. Please report this issue to the provider developers.", + ) -type deploymentDataSource struct { - p internal.Provider -} + return + } -func (d deploymentDataSource) Read(ctx context.Context, request datasource.ReadRequest, response *datasource.ReadResponse) { var newState modelV0 response.Diagnostics.Append(request.Config.Get(ctx, &newState)...) if response.Diagnostics.HasError() { @@ -61,7 +72,7 @@ func (d deploymentDataSource) Read(ctx context.Context, request datasource.ReadR } res, err := deploymentapi.Get(deploymentapi.GetParams{ - API: d.p.GetClient(), + API: d.client, DeploymentID: newState.ID.Value, QueryParams: deputil.QueryParams{ ShowPlans: true, diff --git a/ec/ecdatasource/deploymentdatasource/datasource_test.go b/ec/ecdatasource/deploymentdatasource/datasource_test.go index 2126b708d..12ea29998 100644 --- a/ec/ecdatasource/deploymentdatasource/datasource_test.go +++ b/ec/ecdatasource/deploymentdatasource/datasource_test.go @@ -19,15 +19,18 @@ package deploymentdatasource import ( "context" - "github.com/elastic/terraform-provider-ec/ec/internal/util" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/types" - "testing" "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" + + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_modelToState(t *testing.T) { diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_apm.go b/ec/ecdatasource/deploymentdatasource/flatteners_apm.go index 15ad5aaf9..cb5c8c1df 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_apm.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_apm.go @@ -19,11 +19,13 @@ package deploymentdatasource import ( "context" - "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go index 570d696a3..74d4ad6e6 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go @@ -19,10 +19,11 @@ package deploymentdatasource import ( "context" + "testing" + "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" - "testing" "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go index e34060acc..a53a55115 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go @@ -23,11 +23,12 @@ import ( "fmt" "strconv" - "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go index 3b8fcee59..cc82267b8 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go @@ -21,12 +21,13 @@ import ( "context" "testing" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" + + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" ) func Test_flattenElasticsearchResources(t *testing.T) { diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search.go b/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search.go index 1fbbb6a59..57236deca 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search.go @@ -19,11 +19,13 @@ package deploymentdatasource import ( "context" - "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go index 64baf24e8..ee52d1917 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go @@ -21,12 +21,13 @@ import ( "context" "testing" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" + + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" ) func Test_flattenEnterpriseSearchResource(t *testing.T) { diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server.go b/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server.go index 57b8ec910..c66a9a829 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server.go @@ -19,11 +19,13 @@ package deploymentdatasource import ( "context" - "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go index 7fff8dbff..59afa71dd 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go @@ -21,12 +21,13 @@ import ( "context" "testing" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" + + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" ) func Test_flattenIntegrationsServerResource(t *testing.T) { diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_kibana.go b/ec/ecdatasource/deploymentdatasource/flatteners_kibana.go index 3c27f808f..1c7bcc46f 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_kibana.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_kibana.go @@ -19,11 +19,13 @@ package deploymentdatasource import ( "context" - "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go index 2faea9f6a..97b52cc24 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go @@ -21,12 +21,13 @@ import ( "context" "testing" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" + + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" ) func Test_flattenKibanaResources(t *testing.T) { diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_observability.go b/ec/ecdatasource/deploymentdatasource/flatteners_observability.go index 507ba7607..f00735f15 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_observability.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_observability.go @@ -19,10 +19,12 @@ package deploymentdatasource import ( "context" - "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/elastic/cloud-sdk-go/pkg/models" ) // flattenObservability parses a deployment's observability settings. diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_observability_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_observability_test.go index 4da04c60c..2e7330655 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_observability_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_observability_test.go @@ -21,10 +21,11 @@ import ( "context" "testing" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" + + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/cloud-sdk-go/pkg/models" ) func TestFlattenObservability(t *testing.T) { diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter.go b/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter.go index 51851accc..eb16bb770 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter.go @@ -19,10 +19,12 @@ package deploymentdatasource import ( "context" - "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/elastic/cloud-sdk-go/pkg/models" ) // flattenTrafficFiltering parses a deployment's traffic filtering settings. diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter_test.go index aa7a81d33..9cfc22c2f 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter_test.go @@ -21,8 +21,9 @@ import ( "context" "testing" - "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/stretchr/testify/assert" + + "github.com/elastic/cloud-sdk-go/pkg/models" ) func Test_flattenTrafficFiltering(t *testing.T) { diff --git a/ec/ecdatasource/deploymentdatasource/schema.go b/ec/ecdatasource/deploymentdatasource/schema.go index 53d2fc73d..f77b0f1b4 100644 --- a/ec/ecdatasource/deploymentdatasource/schema.go +++ b/ec/ecdatasource/deploymentdatasource/schema.go @@ -19,12 +19,13 @@ package deploymentdatasource import ( "context" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" ) -func (s DataSourceType) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { +func (d *DataSource) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { return tfsdk.Schema{ Attributes: map[string]tfsdk.Attribute{ "alias": { diff --git a/ec/ecdatasource/deploymentsdatasource/datasource.go b/ec/ecdatasource/deploymentsdatasource/datasource.go index 7f4b6b64c..08139b485 100644 --- a/ec/ecdatasource/deploymentsdatasource/datasource.go +++ b/ec/ecdatasource/deploymentsdatasource/datasource.go @@ -22,37 +22,49 @@ import ( "fmt" "strconv" - "github.com/elastic/terraform-provider-ec/ec/internal" "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/diag" - "github.com/hashicorp/terraform-plugin-framework/provider" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/elastic/terraform-provider-ec/ec/internal" ) -var _ provider.DataSourceType = (*DataSourceType)(nil) +var _ datasource.DataSource = &DataSource{} +var _ datasource.DataSourceWithConfigure = &DataSource{} +var _ datasource.DataSourceWithGetSchema = &DataSource{} +var _ datasource.DataSourceWithMetadata = &DataSource{} -type DataSourceType struct{} +type DataSource struct { + client *api.API +} -func (s DataSourceType) NewDataSource(ctx context.Context, in provider.Provider) (datasource.DataSource, diag.Diagnostics) { - p, diags := internal.ConvertProviderType(in) +func (d *DataSource) Configure(ctx context.Context, request datasource.ConfigureRequest, response *datasource.ConfigureResponse) { + client, diags := internal.ConvertProviderData(request.ProviderData) + response.Diagnostics.Append(diags...) + d.client = client +} - return &deploymentsDataSource{ - p: p, - }, diags +func (d *DataSource) Metadata(ctx context.Context, request datasource.MetadataRequest, response *datasource.MetadataResponse) { + response.TypeName = request.ProviderTypeName + "_deployments" } -var _ datasource.DataSource = (*deploymentsDataSource)(nil) +func (d DataSource) Read(ctx context.Context, request datasource.ReadRequest, response *datasource.ReadResponse) { + // Prevent panic if the provider has not been configured. + if d.client == nil { + response.Diagnostics.AddError( + "Unconfigured API Client", + "Expected configured API client. Please report this issue to the provider developers.", + ) -type deploymentsDataSource struct { - p internal.Provider -} + return + } -func (d deploymentsDataSource) Read(ctx context.Context, request datasource.ReadRequest, response *datasource.ReadResponse) { var newState modelV0 response.Diagnostics.Append(request.Config.Get(ctx, &newState)...) if response.Diagnostics.HasError() { @@ -66,7 +78,7 @@ func (d deploymentsDataSource) Read(ctx context.Context, request datasource.Read } res, err := deploymentapi.Search(deploymentapi.SearchParams{ - API: d.p.GetClient(), + API: d.client, Request: query, }) if err != nil { diff --git a/ec/ecdatasource/deploymentsdatasource/datasource_test.go b/ec/ecdatasource/deploymentsdatasource/datasource_test.go index 25eaa0f78..23b4e2db4 100644 --- a/ec/ecdatasource/deploymentsdatasource/datasource_test.go +++ b/ec/ecdatasource/deploymentsdatasource/datasource_test.go @@ -19,13 +19,15 @@ package deploymentsdatasource import ( "context" + "testing" + "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/types" - "testing" + + "github.com/stretchr/testify/assert" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" ) func Test_modelToState(t *testing.T) { diff --git a/ec/ecdatasource/deploymentsdatasource/expanders.go b/ec/ecdatasource/deploymentsdatasource/expanders.go index 96f401c5c..635883ab8 100644 --- a/ec/ecdatasource/deploymentsdatasource/expanders.go +++ b/ec/ecdatasource/deploymentsdatasource/expanders.go @@ -20,11 +20,13 @@ package deploymentsdatasource import ( "context" "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-framework/diag" - "github.com/hashicorp/terraform-plugin-framework/types" ) // expandFilters expands all filters into a search request model diff --git a/ec/ecdatasource/deploymentsdatasource/expanders_test.go b/ec/ecdatasource/deploymentsdatasource/expanders_test.go index 6727be188..8ddb20da5 100644 --- a/ec/ecdatasource/deploymentsdatasource/expanders_test.go +++ b/ec/ecdatasource/deploymentsdatasource/expanders_test.go @@ -20,15 +20,18 @@ package deploymentsdatasource import ( "context" "encoding/json" - "github.com/elastic/terraform-provider-ec/ec/internal/util" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" - "testing" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" + + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_expandFilters(t *testing.T) { diff --git a/ec/ecdatasource/deploymentsdatasource/schema.go b/ec/ecdatasource/deploymentsdatasource/schema.go index 7275db68b..c633e0f98 100644 --- a/ec/ecdatasource/deploymentsdatasource/schema.go +++ b/ec/ecdatasource/deploymentsdatasource/schema.go @@ -19,14 +19,16 @@ package deploymentsdatasource import ( "context" - "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" ) -func (s DataSourceType) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { +func (d *DataSource) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { return tfsdk.Schema{ Attributes: map[string]tfsdk.Attribute{ "name_prefix": { diff --git a/ec/ecdatasource/stackdatasource/datasource.go b/ec/ecdatasource/stackdatasource/datasource.go index 200c72f71..22a312ac3 100644 --- a/ec/ecdatasource/stackdatasource/datasource.go +++ b/ec/ecdatasource/stackdatasource/datasource.go @@ -22,35 +22,48 @@ import ( "fmt" "regexp" - "github.com/elastic/cloud-sdk-go/pkg/api/stackapi" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/terraform-provider-ec/ec/internal" "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/diag" - "github.com/hashicorp/terraform-plugin-framework/provider" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" -) -var _ provider.DataSourceType = (*DataSourceType)(nil) + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/stackapi" + "github.com/elastic/cloud-sdk-go/pkg/models" -type DataSourceType struct{} + "github.com/elastic/terraform-provider-ec/ec/internal" +) -func (s DataSourceType) NewDataSource(ctx context.Context, in provider.Provider) (datasource.DataSource, diag.Diagnostics) { - p, diags := internal.ConvertProviderType(in) +var _ datasource.DataSource = &DataSource{} +var _ datasource.DataSourceWithConfigure = &DataSource{} +var _ datasource.DataSourceWithGetSchema = &DataSource{} +var _ datasource.DataSourceWithMetadata = &DataSource{} - return &stackDataSource{ - p: p, - }, diags +type DataSource struct { + client *api.API } -var _ datasource.DataSource = (*stackDataSource)(nil) +func (d *DataSource) Configure(ctx context.Context, request datasource.ConfigureRequest, response *datasource.ConfigureResponse) { + client, diags := internal.ConvertProviderData(request.ProviderData) + response.Diagnostics.Append(diags...) + d.client = client +} -type stackDataSource struct { - p internal.Provider +func (d *DataSource) Metadata(ctx context.Context, request datasource.MetadataRequest, response *datasource.MetadataResponse) { + response.TypeName = request.ProviderTypeName + "_stack" } -func (d stackDataSource) Read(ctx context.Context, request datasource.ReadRequest, response *datasource.ReadResponse) { +func (d DataSource) Read(ctx context.Context, request datasource.ReadRequest, response *datasource.ReadResponse) { + // Prevent panic if the provider has not been configured. + if d.client == nil { + response.Diagnostics.AddError( + "Unconfigured API Client", + "Expected configured API client. Please report this issue to the provider developers.", + ) + + return + } + var newState modelV0 response.Diagnostics.Append(request.Config.Get(ctx, &newState)...) if response.Diagnostics.HasError() { @@ -58,7 +71,7 @@ func (d stackDataSource) Read(ctx context.Context, request datasource.ReadReques } res, err := stackapi.List(stackapi.ListParams{ - API: d.p.GetClient(), + API: d.client, Region: newState.Region.Value, }) if err != nil { diff --git a/ec/ecdatasource/stackdatasource/datasource_test.go b/ec/ecdatasource/stackdatasource/datasource_test.go index f031550ef..23755534e 100644 --- a/ec/ecdatasource/stackdatasource/datasource_test.go +++ b/ec/ecdatasource/stackdatasource/datasource_test.go @@ -21,15 +21,18 @@ import ( "context" "errors" "fmt" - "github.com/elastic/terraform-provider-ec/ec/internal/util" - "github.com/hashicorp/terraform-plugin-framework/attr" - "github.com/hashicorp/terraform-plugin-framework/types" "regexp/syntax" "testing" + "github.com/stretchr/testify/assert" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" + + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_modelToState(t *testing.T) { diff --git a/ec/ecdatasource/stackdatasource/flatteners_apm.go b/ec/ecdatasource/stackdatasource/flatteners_apm.go index 68b3b9f78..194017cbf 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_apm.go +++ b/ec/ecdatasource/stackdatasource/flatteners_apm.go @@ -19,10 +19,12 @@ package stackdatasource import ( "context" - "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/elastic/cloud-sdk-go/pkg/models" ) // flattenStackVersionApmConfig takes a StackVersionApmConfigs and flattens it. diff --git a/ec/ecdatasource/stackdatasource/flatteners_apm_test.go b/ec/ecdatasource/stackdatasource/flatteners_apm_test.go index e12515e61..b55ae794a 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_apm_test.go +++ b/ec/ecdatasource/stackdatasource/flatteners_apm_test.go @@ -19,13 +19,15 @@ package stackdatasource import ( "context" - "github.com/elastic/terraform-provider-ec/ec/internal/util" + "testing" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" - "testing" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" + + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_flattenApmResource(t *testing.T) { diff --git a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go index 39e2908a3..15026a434 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go +++ b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go @@ -19,10 +19,12 @@ package stackdatasource import ( "context" - "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/elastic/cloud-sdk-go/pkg/models" ) // flattenStackVersionElasticsearchConfig takes a StackVersionElasticsearchConfig and flattens it. diff --git a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go index 6643db967..75a6b95f5 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go +++ b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go @@ -19,13 +19,15 @@ package stackdatasource import ( "context" - "github.com/elastic/terraform-provider-ec/ec/internal/util" + "testing" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" - "testing" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" + + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_flattenElasticsearchResource(t *testing.T) { diff --git a/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go b/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go index fb34b449b..614ad8e17 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go +++ b/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go @@ -19,10 +19,12 @@ package stackdatasource import ( "context" - "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/elastic/cloud-sdk-go/pkg/models" ) // flattenStackVersionEnterpriseSearchConfig takes a StackVersionEnterpriseSearchConfig and flattens it. diff --git a/ec/ecdatasource/stackdatasource/flatteners_enterprise_search_test.go b/ec/ecdatasource/stackdatasource/flatteners_enterprise_search_test.go index a781c3284..426ee8b5e 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_enterprise_search_test.go +++ b/ec/ecdatasource/stackdatasource/flatteners_enterprise_search_test.go @@ -19,13 +19,15 @@ package stackdatasource import ( "context" - "github.com/elastic/terraform-provider-ec/ec/internal/util" + "testing" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" - "testing" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" + + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_flattenEnterpriseSearchResources(t *testing.T) { diff --git a/ec/ecdatasource/stackdatasource/flatteners_kibana.go b/ec/ecdatasource/stackdatasource/flatteners_kibana.go index 63dfe6680..31060b7f8 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_kibana.go +++ b/ec/ecdatasource/stackdatasource/flatteners_kibana.go @@ -19,10 +19,12 @@ package stackdatasource import ( "context" - "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/elastic/cloud-sdk-go/pkg/models" ) // flattenStackVersionKibanaConfig takes a StackVersionKibanaConfig and flattens it. diff --git a/ec/ecdatasource/stackdatasource/flatteners_kibana_test.go b/ec/ecdatasource/stackdatasource/flatteners_kibana_test.go index b9ca34ec8..78083a172 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_kibana_test.go +++ b/ec/ecdatasource/stackdatasource/flatteners_kibana_test.go @@ -19,13 +19,15 @@ package stackdatasource import ( "context" - "github.com/elastic/terraform-provider-ec/ec/internal/util" + "testing" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" - "testing" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" + + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_flattenKibanaResources(t *testing.T) { diff --git a/ec/ecdatasource/stackdatasource/schema.go b/ec/ecdatasource/stackdatasource/schema.go index df15bc0dd..9be8f45d9 100644 --- a/ec/ecdatasource/stackdatasource/schema.go +++ b/ec/ecdatasource/stackdatasource/schema.go @@ -19,13 +19,14 @@ package stackdatasource import ( "context" + "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" ) -func (s DataSourceType) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { +func (d *DataSource) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { return tfsdk.Schema{ Attributes: map[string]tfsdk.Attribute{ "version_regex": { diff --git a/ec/ecresource/deploymentresource/apm_expanders_test.go b/ec/ecresource/deploymentresource/apm_expanders_test.go index f77d83982..4ecefabe7 100644 --- a/ec/ecresource/deploymentresource/apm_expanders_test.go +++ b/ec/ecresource/deploymentresource/apm_expanders_test.go @@ -21,10 +21,11 @@ import ( "errors" "testing" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" ) func Test_expandApmResources(t *testing.T) { diff --git a/ec/ecresource/deploymentresource/apm_flatteners_test.go b/ec/ecresource/deploymentresource/apm_flatteners_test.go index 68a07428a..94441116b 100644 --- a/ec/ecresource/deploymentresource/apm_flatteners_test.go +++ b/ec/ecresource/deploymentresource/apm_flatteners_test.go @@ -20,10 +20,11 @@ package deploymentresource import ( "testing" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" ) func Test_flattenApmResource(t *testing.T) { diff --git a/ec/ecresource/deploymentresource/create.go b/ec/ecresource/deploymentresource/create.go index e8690d32c..26dfc6377 100644 --- a/ec/ecresource/deploymentresource/create.go +++ b/ec/ecresource/deploymentresource/create.go @@ -21,11 +21,12 @@ import ( "context" "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) // createResource will createResource a new deployment from the specified settings. diff --git a/ec/ecresource/deploymentresource/delete.go b/ec/ecresource/deploymentresource/delete.go index 8a8a63c97..8ee3c4253 100644 --- a/ec/ecresource/deploymentresource/delete.go +++ b/ec/ecresource/deploymentresource/delete.go @@ -22,13 +22,14 @@ import ( "errors" "strings" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" "github.com/elastic/cloud-sdk-go/pkg/client/deployments" "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) // Delete shuts down and deletes the remote deployment retrying up to 3 times diff --git a/ec/ecresource/deploymentresource/delete_test.go b/ec/ecresource/deploymentresource/delete_test.go index 7dba42c31..b06b1cd0e 100644 --- a/ec/ecresource/deploymentresource/delete_test.go +++ b/ec/ecresource/deploymentresource/delete_test.go @@ -22,12 +22,14 @@ import ( "errors" "testing" + "github.com/stretchr/testify/assert" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/ec/ecresource/deploymentresource/elasticsearch_expanders.go b/ec/ecresource/deploymentresource/elasticsearch_expanders.go index 67c59ef3d..6b3854042 100644 --- a/ec/ecresource/deploymentresource/elasticsearch_expanders.go +++ b/ec/ecresource/deploymentresource/elasticsearch_expanders.go @@ -24,10 +24,11 @@ import ( "strconv" "strings" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deploymentsize" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/ec/ecresource/deploymentresource/elasticsearch_expanders_test.go b/ec/ecresource/deploymentresource/elasticsearch_expanders_test.go index b30edf8c8..e2f966947 100644 --- a/ec/ecresource/deploymentresource/elasticsearch_expanders_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch_expanders_test.go @@ -21,11 +21,12 @@ import ( "errors" "testing" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" ) func Test_expandEsResource(t *testing.T) { diff --git a/ec/ecresource/deploymentresource/elasticsearch_flatteners.go b/ec/ecresource/deploymentresource/elasticsearch_flatteners.go index 512edc7d5..538caf172 100644 --- a/ec/ecresource/deploymentresource/elasticsearch_flatteners.go +++ b/ec/ecresource/deploymentresource/elasticsearch_flatteners.go @@ -24,9 +24,10 @@ import ( "sort" "strconv" - "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/ec/ecresource/deploymentresource/elasticsearch_flatteners_test.go b/ec/ecresource/deploymentresource/elasticsearch_flatteners_test.go index 33302f76c..2d0f0c992 100644 --- a/ec/ecresource/deploymentresource/elasticsearch_flatteners_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch_flatteners_test.go @@ -20,11 +20,12 @@ package deploymentresource import ( "testing" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" ) func Test_flattenEsResource(t *testing.T) { diff --git a/ec/ecresource/deploymentresource/elasticsearch_remote_cluster_expanders.go b/ec/ecresource/deploymentresource/elasticsearch_remote_cluster_expanders.go index be2f2535a..7d01f7782 100644 --- a/ec/ecresource/deploymentresource/elasticsearch_remote_cluster_expanders.go +++ b/ec/ecresource/deploymentresource/elasticsearch_remote_cluster_expanders.go @@ -18,11 +18,12 @@ package deploymentresource import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/esremoteclustersapi" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func handleRemoteClusters(d *schema.ResourceData, client *api.API) error { diff --git a/ec/ecresource/deploymentresource/elasticsearch_remote_cluster_expanders_test.go b/ec/ecresource/deploymentresource/elasticsearch_remote_cluster_expanders_test.go index a79adaaee..c069aa03b 100644 --- a/ec/ecresource/deploymentresource/elasticsearch_remote_cluster_expanders_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch_remote_cluster_expanders_test.go @@ -20,12 +20,13 @@ package deploymentresource import ( "testing" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/ec/ecresource/deploymentresource/enterprise_search_expanders_test.go b/ec/ecresource/deploymentresource/enterprise_search_expanders_test.go index 3385c6526..b70375a4f 100644 --- a/ec/ecresource/deploymentresource/enterprise_search_expanders_test.go +++ b/ec/ecresource/deploymentresource/enterprise_search_expanders_test.go @@ -21,10 +21,11 @@ import ( "errors" "testing" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" ) func Test_expandEssResources(t *testing.T) { diff --git a/ec/ecresource/deploymentresource/enterprise_search_flatteners_test.go b/ec/ecresource/deploymentresource/enterprise_search_flatteners_test.go index eb70eb6c7..69d725de7 100644 --- a/ec/ecresource/deploymentresource/enterprise_search_flatteners_test.go +++ b/ec/ecresource/deploymentresource/enterprise_search_flatteners_test.go @@ -20,10 +20,11 @@ package deploymentresource import ( "testing" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" ) func Test_flattenEssResource(t *testing.T) { diff --git a/ec/ecresource/deploymentresource/expanders.go b/ec/ecresource/deploymentresource/expanders.go index 1f4048179..108fb7e9d 100644 --- a/ec/ecresource/deploymentresource/expanders.go +++ b/ec/ecresource/deploymentresource/expanders.go @@ -21,13 +21,14 @@ import ( "fmt" "sort" - semver "github.com/blang/semver/v4" + "github.com/blang/semver/v4" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deptemplateapi" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/multierror" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/ec/ecresource/deploymentresource/expanders_test.go b/ec/ecresource/deploymentresource/expanders_test.go index 9e60fe7ee..51d07c2f6 100644 --- a/ec/ecresource/deploymentresource/expanders_test.go +++ b/ec/ecresource/deploymentresource/expanders_test.go @@ -24,13 +24,14 @@ import ( "os" "testing" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/multierror" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/ec/ecresource/deploymentresource/flatteners.go b/ec/ecresource/deploymentresource/flatteners.go index 86c6cbaaf..91aa467d0 100644 --- a/ec/ecresource/deploymentresource/flatteners.go +++ b/ec/ecresource/deploymentresource/flatteners.go @@ -22,10 +22,11 @@ import ( "fmt" "strings" - semver "github.com/blang/semver/v4" + "github.com/blang/semver/v4" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/ec/ecresource/deploymentresource/flatteners_test.go b/ec/ecresource/deploymentresource/flatteners_test.go index 8b4400fb8..b268cd67a 100644 --- a/ec/ecresource/deploymentresource/flatteners_test.go +++ b/ec/ecresource/deploymentresource/flatteners_test.go @@ -21,11 +21,12 @@ import ( "errors" "testing" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/ec/ecresource/deploymentresource/import.go b/ec/ecresource/deploymentresource/import.go index f685cfa9d..a12a7762e 100644 --- a/ec/ecresource/deploymentresource/import.go +++ b/ec/ecresource/deploymentresource/import.go @@ -22,7 +22,8 @@ import ( "errors" "fmt" - semver "github.com/blang/semver/v4" + "github.com/blang/semver/v4" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deputil" diff --git a/ec/ecresource/deploymentresource/import_test.go b/ec/ecresource/deploymentresource/import_test.go index f8e631ff0..6766e7ebb 100644 --- a/ec/ecresource/deploymentresource/import_test.go +++ b/ec/ecresource/deploymentresource/import_test.go @@ -22,11 +22,12 @@ import ( "errors" "testing" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/ec/ecresource/deploymentresource/integrations_server_expanders_test.go b/ec/ecresource/deploymentresource/integrations_server_expanders_test.go index 90f3acdf3..76d2b84b2 100644 --- a/ec/ecresource/deploymentresource/integrations_server_expanders_test.go +++ b/ec/ecresource/deploymentresource/integrations_server_expanders_test.go @@ -21,10 +21,11 @@ import ( "errors" "testing" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" ) func Test_expandIntegrationsServerResources(t *testing.T) { diff --git a/ec/ecresource/deploymentresource/integrations_server_flatteners_test.go b/ec/ecresource/deploymentresource/integrations_server_flatteners_test.go index 9ae0ec20c..3791ae26d 100644 --- a/ec/ecresource/deploymentresource/integrations_server_flatteners_test.go +++ b/ec/ecresource/deploymentresource/integrations_server_flatteners_test.go @@ -20,10 +20,11 @@ package deploymentresource import ( "testing" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" ) func Test_flattenIntegrationsServerResource(t *testing.T) { diff --git a/ec/ecresource/deploymentresource/kibana_expanders_test.go b/ec/ecresource/deploymentresource/kibana_expanders_test.go index 66115f3fc..02a0a8f17 100644 --- a/ec/ecresource/deploymentresource/kibana_expanders_test.go +++ b/ec/ecresource/deploymentresource/kibana_expanders_test.go @@ -21,10 +21,11 @@ import ( "errors" "testing" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" ) func Test_expandKibanaResources(t *testing.T) { diff --git a/ec/ecresource/deploymentresource/kibana_flatteners_test.go b/ec/ecresource/deploymentresource/kibana_flatteners_test.go index 48b1a105a..0cd409805 100644 --- a/ec/ecresource/deploymentresource/kibana_flatteners_test.go +++ b/ec/ecresource/deploymentresource/kibana_flatteners_test.go @@ -20,10 +20,11 @@ package deploymentresource import ( "testing" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" ) func Test_flattenKibanaResources(t *testing.T) { diff --git a/ec/ecresource/deploymentresource/observability_test.go b/ec/ecresource/deploymentresource/observability_test.go index 96ab053fb..f7119f4ab 100644 --- a/ec/ecresource/deploymentresource/observability_test.go +++ b/ec/ecresource/deploymentresource/observability_test.go @@ -20,11 +20,12 @@ package deploymentresource import ( "testing" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" ) func TestFlattenObservability(t *testing.T) { diff --git a/ec/ecresource/deploymentresource/read.go b/ec/ecresource/deploymentresource/read.go index 088645d88..8b0145df3 100644 --- a/ec/ecresource/deploymentresource/read.go +++ b/ec/ecresource/deploymentresource/read.go @@ -21,6 +21,9 @@ import ( "context" "errors" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/apierror" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" @@ -29,8 +32,6 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/client/deployments" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) // Read queries the remote deployment state and updates the local state. diff --git a/ec/ecresource/deploymentresource/read_test.go b/ec/ecresource/deploymentresource/read_test.go index afdf2f1b4..b54ff5523 100644 --- a/ec/ecresource/deploymentresource/read_test.go +++ b/ec/ecresource/deploymentresource/read_test.go @@ -21,16 +21,18 @@ import ( "context" "testing" + "github.com/go-openapi/runtime" + "github.com/stretchr/testify/assert" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/apierror" "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/client/deployments" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/go-openapi/runtime" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/ec/ecresource/deploymentresource/schema_elasticsearch.go b/ec/ecresource/deploymentresource/schema_elasticsearch.go index 0a460fd45..5bd75ff94 100644 --- a/ec/ecresource/deploymentresource/schema_elasticsearch.go +++ b/ec/ecresource/deploymentresource/schema_elasticsearch.go @@ -23,9 +23,10 @@ import ( "strconv" "strings" - "github.com/elastic/cloud-sdk-go/pkg/util/slice" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/elastic/cloud-sdk-go/pkg/util/slice" ) func newElasticsearchResource() *schema.Resource { diff --git a/ec/ecresource/deploymentresource/stopped_resource_test.go b/ec/ecresource/deploymentresource/stopped_resource_test.go index 6b987a49f..bd2bb0ae3 100644 --- a/ec/ecresource/deploymentresource/stopped_resource_test.go +++ b/ec/ecresource/deploymentresource/stopped_resource_test.go @@ -20,9 +20,10 @@ package deploymentresource import ( "testing" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" ) func Test_isApmResourceStopped(t *testing.T) { diff --git a/ec/ecresource/deploymentresource/testutil_func.go b/ec/ecresource/deploymentresource/testutil_func.go index c81799e72..bcaae5251 100644 --- a/ec/ecresource/deploymentresource/testutil_func.go +++ b/ec/ecresource/deploymentresource/testutil_func.go @@ -23,8 +23,9 @@ import ( "os" "testing" - "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/elastic/cloud-sdk-go/pkg/models" ) // parseDeploymentTemplate is a test helper which parse a file by path and diff --git a/ec/ecresource/deploymentresource/testutil_func_test.go b/ec/ecresource/deploymentresource/testutil_func_test.go index 1d07eff67..e9210f5ba 100644 --- a/ec/ecresource/deploymentresource/testutil_func_test.go +++ b/ec/ecresource/deploymentresource/testutil_func_test.go @@ -21,9 +21,10 @@ import ( "os" "testing" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" ) func Test_parseDeploymentTemplate(t *testing.T) { diff --git a/ec/ecresource/deploymentresource/traffic_filter.go b/ec/ecresource/deploymentresource/traffic_filter.go index 835bab808..a2c1f0c58 100644 --- a/ec/ecresource/deploymentresource/traffic_filter.go +++ b/ec/ecresource/deploymentresource/traffic_filter.go @@ -18,9 +18,10 @@ package deploymentresource import ( - "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/ec/ecresource/deploymentresource/traffic_filter_test.go b/ec/ecresource/deploymentresource/traffic_filter_test.go index 36ace0b6a..506cc762e 100644 --- a/ec/ecresource/deploymentresource/traffic_filter_test.go +++ b/ec/ecresource/deploymentresource/traffic_filter_test.go @@ -20,9 +20,10 @@ package deploymentresource import ( "testing" - "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/stretchr/testify/assert" + + "github.com/elastic/cloud-sdk-go/pkg/models" ) func TestParseTrafficFiltering(t *testing.T) { diff --git a/ec/ecresource/deploymentresource/update.go b/ec/ecresource/deploymentresource/update.go index 26f33ab53..9fae24ab6 100644 --- a/ec/ecresource/deploymentresource/update.go +++ b/ec/ecresource/deploymentresource/update.go @@ -21,11 +21,12 @@ import ( "context" "strings" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) // Update syncs the remote state with the local. diff --git a/ec/ecresource/deploymentresource/update_test.go b/ec/ecresource/deploymentresource/update_test.go index e0d6c9d4a..92d205cdf 100644 --- a/ec/ecresource/deploymentresource/update_test.go +++ b/ec/ecresource/deploymentresource/update_test.go @@ -20,10 +20,11 @@ package deploymentresource import ( "testing" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/ec/ecresource/deploymentresource/update_traffic_rules.go b/ec/ecresource/deploymentresource/update_traffic_rules.go index 3bf66d60b..f39d47c83 100644 --- a/ec/ecresource/deploymentresource/update_traffic_rules.go +++ b/ec/ecresource/deploymentresource/update_traffic_rules.go @@ -18,9 +18,10 @@ package deploymentresource import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/ec/ecresource/elasticsearchkeystoreresource/create.go b/ec/ecresource/elasticsearchkeystoreresource/create.go index bade94c14..0ac5dc644 100644 --- a/ec/ecresource/elasticsearchkeystoreresource/create.go +++ b/ec/ecresource/elasticsearchkeystoreresource/create.go @@ -22,10 +22,11 @@ import ( "strconv" "strings" - "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/eskeystoreapi" ) diff --git a/ec/ecresource/elasticsearchkeystoreresource/delete.go b/ec/ecresource/elasticsearchkeystoreresource/delete.go index b6c6d7e63..ae951e4bc 100644 --- a/ec/ecresource/elasticsearchkeystoreresource/delete.go +++ b/ec/ecresource/elasticsearchkeystoreresource/delete.go @@ -20,10 +20,11 @@ package elasticsearchkeystoreresource import ( "context" - "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/eskeystoreapi" ) diff --git a/ec/ecresource/elasticsearchkeystoreresource/expanders.go b/ec/ecresource/elasticsearchkeystoreresource/expanders.go index 9ed695f8b..74dd423e2 100644 --- a/ec/ecresource/elasticsearchkeystoreresource/expanders.go +++ b/ec/ecresource/elasticsearchkeystoreresource/expanders.go @@ -20,9 +20,10 @@ package elasticsearchkeystoreresource import ( "encoding/json" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func expandModel(d *schema.ResourceData) *models.KeystoreContents { diff --git a/ec/ecresource/elasticsearchkeystoreresource/expanders_test.go b/ec/ecresource/elasticsearchkeystoreresource/expanders_test.go index a1ebe37f8..6cc4a12cb 100644 --- a/ec/ecresource/elasticsearchkeystoreresource/expanders_test.go +++ b/ec/ecresource/elasticsearchkeystoreresource/expanders_test.go @@ -20,11 +20,12 @@ package elasticsearchkeystoreresource import ( "testing" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" ) func Test_expandModel(t *testing.T) { diff --git a/ec/ecresource/elasticsearchkeystoreresource/read.go b/ec/ecresource/elasticsearchkeystoreresource/read.go index beec83b44..a6710b384 100644 --- a/ec/ecresource/elasticsearchkeystoreresource/read.go +++ b/ec/ecresource/elasticsearchkeystoreresource/read.go @@ -20,11 +20,12 @@ package elasticsearchkeystoreresource import ( "context" - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/eskeystoreapi" ) diff --git a/ec/ecresource/elasticsearchkeystoreresource/read_test.go b/ec/ecresource/elasticsearchkeystoreresource/read_test.go index e8b568d9e..2791e43ca 100644 --- a/ec/ecresource/elasticsearchkeystoreresource/read_test.go +++ b/ec/ecresource/elasticsearchkeystoreresource/read_test.go @@ -20,11 +20,12 @@ package elasticsearchkeystoreresource import ( "testing" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" ) func Test_modelToState(t *testing.T) { diff --git a/ec/ecresource/elasticsearchkeystoreresource/update.go b/ec/ecresource/elasticsearchkeystoreresource/update.go index 2cc190a5e..46873b645 100644 --- a/ec/ecresource/elasticsearchkeystoreresource/update.go +++ b/ec/ecresource/elasticsearchkeystoreresource/update.go @@ -20,10 +20,11 @@ package elasticsearchkeystoreresource import ( "context" - "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/eskeystoreapi" ) diff --git a/ec/ecresource/extensionresource/create.go b/ec/ecresource/extensionresource/create.go index cd5421e6c..159a47062 100644 --- a/ec/ecresource/extensionresource/create.go +++ b/ec/ecresource/extensionresource/create.go @@ -20,12 +20,13 @@ package extensionresource import ( "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/extensionapi" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) // createResource will create a new deployment extension diff --git a/ec/ecresource/extensionresource/create_test.go b/ec/ecresource/extensionresource/create_test.go index 2c33b134f..78e527871 100644 --- a/ec/ecresource/extensionresource/create_test.go +++ b/ec/ecresource/extensionresource/create_test.go @@ -21,11 +21,13 @@ import ( "context" "testing" - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/stretchr/testify/assert" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/ec/ecresource/extensionresource/delete.go b/ec/ecresource/extensionresource/delete.go index d8ab87ba1..07152fa9c 100644 --- a/ec/ecresource/extensionresource/delete.go +++ b/ec/ecresource/extensionresource/delete.go @@ -21,11 +21,12 @@ import ( "context" "errors" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/extensionapi" "github.com/elastic/cloud-sdk-go/pkg/client/extensions" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func deleteResource(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { diff --git a/ec/ecresource/extensionresource/delete_test.go b/ec/ecresource/extensionresource/delete_test.go index b8f2af718..3e5c615b4 100644 --- a/ec/ecresource/extensionresource/delete_test.go +++ b/ec/ecresource/extensionresource/delete_test.go @@ -21,11 +21,13 @@ import ( "context" "testing" - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/stretchr/testify/assert" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/ec/ecresource/extensionresource/read.go b/ec/ecresource/extensionresource/read.go index 146d5c6bb..05624fc4f 100644 --- a/ec/ecresource/extensionresource/read.go +++ b/ec/ecresource/extensionresource/read.go @@ -21,13 +21,14 @@ import ( "context" "errors" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/extensionapi" "github.com/elastic/cloud-sdk-go/pkg/client/extensions" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func readResource(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { diff --git a/ec/ecresource/extensionresource/read_test.go b/ec/ecresource/extensionresource/read_test.go index ae45c80a7..827518986 100644 --- a/ec/ecresource/extensionresource/read_test.go +++ b/ec/ecresource/extensionresource/read_test.go @@ -22,15 +22,15 @@ import ( "testing" "github.com/go-openapi/strfmt" + "github.com/stretchr/testify/assert" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/ec/ecresource/extensionresource/update.go b/ec/ecresource/extensionresource/update.go index cc2161f15..613f0a780 100644 --- a/ec/ecresource/extensionresource/update.go +++ b/ec/ecresource/extensionresource/update.go @@ -20,12 +20,13 @@ package extensionresource import ( "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/extensionapi" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func updateResource(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { diff --git a/ec/ecresource/extensionresource/update_test.go b/ec/ecresource/extensionresource/update_test.go index 0970a0322..6c42b610f 100644 --- a/ec/ecresource/extensionresource/update_test.go +++ b/ec/ecresource/extensionresource/update_test.go @@ -21,14 +21,16 @@ import ( "context" "testing" + "github.com/go-openapi/strfmt" + "github.com/stretchr/testify/assert" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/go-openapi/strfmt" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/ec/ecresource/extensionresource/upload.go b/ec/ecresource/extensionresource/upload.go index 18fc7ebbb..d05851575 100644 --- a/ec/ecresource/extensionresource/upload.go +++ b/ec/ecresource/extensionresource/upload.go @@ -20,10 +20,11 @@ package extensionresource import ( "os" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/extensionapi" "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func uploadExtension(client *api.API, d *schema.ResourceData) error { diff --git a/ec/ecresource/trafficfilterassocresource/create.go b/ec/ecresource/trafficfilterassocresource/create.go index 7c2946995..3046ab256 100644 --- a/ec/ecresource/trafficfilterassocresource/create.go +++ b/ec/ecresource/trafficfilterassocresource/create.go @@ -20,12 +20,24 @@ package trafficfilterassocresource import ( "context" "fmt" - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" + "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" ) -func (t trafficFilterAssocResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { +func (r Resource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + // Prevent panic if the provider has not been configured. + if r.client == nil { + response.Diagnostics.AddError( + "Unconfigured API Client", + "Expected configured API client. Please report this issue to the provider developers.", + ) + + return + } + var newState modelV0 diags := request.Plan.Get(ctx, &newState) @@ -35,7 +47,7 @@ func (t trafficFilterAssocResource) Create(ctx context.Context, request resource } if err := trafficfilterapi.CreateAssociation(trafficfilterapi.CreateAssociationParams{ - API: t.provider.GetClient(), + API: r.client, ID: newState.TrafficFilterID.Value, EntityID: newState.DeploymentID.Value, EntityType: entityTypeDeployment, diff --git a/ec/ecresource/trafficfilterassocresource/delete.go b/ec/ecresource/trafficfilterassocresource/delete.go index 8d4103b55..0f2b2233d 100644 --- a/ec/ecresource/trafficfilterassocresource/delete.go +++ b/ec/ecresource/trafficfilterassocresource/delete.go @@ -20,12 +20,24 @@ package trafficfilterassocresource import ( "context" "errors" + + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" "github.com/elastic/cloud-sdk-go/pkg/client/deployments_traffic_filter" - "github.com/hashicorp/terraform-plugin-framework/resource" ) -func (t trafficFilterAssocResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { +func (r Resource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + // Prevent panic if the provider has not been configured. + if r.client == nil { + response.Diagnostics.AddError( + "Unconfigured API Client", + "Expected configured API client. Please report this issue to the provider developers.", + ) + + return + } + var state modelV0 diags := request.State.Get(ctx, &state) @@ -35,7 +47,7 @@ func (t trafficFilterAssocResource) Delete(ctx context.Context, request resource } if err := trafficfilterapi.DeleteAssociation(trafficfilterapi.DeleteAssociationParams{ - API: t.provider.GetClient(), + API: r.client, ID: state.TrafficFilterID.Value, EntityID: state.DeploymentID.Value, EntityType: entityTypeDeployment, diff --git a/ec/ecresource/trafficfilterassocresource/import_state.go b/ec/ecresource/trafficfilterassocresource/import_state.go index 22d8c3e30..1c1c3cb1d 100644 --- a/ec/ecresource/trafficfilterassocresource/import_state.go +++ b/ec/ecresource/trafficfilterassocresource/import_state.go @@ -20,12 +20,13 @@ package trafficfilterassocresource import ( "context" "fmt" + "strings" + "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" - "strings" ) -func (t trafficFilterAssocResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { +func (r Resource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { idParts := strings.Split(request.ID, ",") if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { diff --git a/ec/ecresource/trafficfilterassocresource/read.go b/ec/ecresource/trafficfilterassocresource/read.go index 31a901b1e..633f34be3 100644 --- a/ec/ecresource/trafficfilterassocresource/read.go +++ b/ec/ecresource/trafficfilterassocresource/read.go @@ -19,12 +19,25 @@ package trafficfilterassocresource import ( "context" + + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" + "github.com/elastic/terraform-provider-ec/ec/internal/util" - "github.com/hashicorp/terraform-plugin-framework/resource" ) -func (t trafficFilterAssocResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { +func (r Resource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + // Prevent panic if the provider has not been configured. + if r.client == nil { + response.Diagnostics.AddError( + "Unconfigured API Client", + "Expected configured API client. Please report this issue to the provider developers.", + ) + + return + } + var state modelV0 diags := request.State.Get(ctx, &state) @@ -34,7 +47,7 @@ func (t trafficFilterAssocResource) Read(ctx context.Context, request resource.R } res, err := trafficfilterapi.Get(trafficfilterapi.GetParams{ - API: t.provider.GetClient(), + API: r.client, ID: state.TrafficFilterID.Value, IncludeAssociations: true, }) diff --git a/ec/ecresource/trafficfilterassocresource/resource_test.go b/ec/ecresource/trafficfilterassocresource/resource_test.go index 8b64425d3..ca254494e 100644 --- a/ec/ecresource/trafficfilterassocresource/resource_test.go +++ b/ec/ecresource/trafficfilterassocresource/resource_test.go @@ -18,16 +18,18 @@ package trafficfilterassocresource_test import ( - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/terraform-provider-ec/ec" - "github.com/hashicorp/terraform-plugin-framework/providerserver" - "github.com/hashicorp/terraform-plugin-go/tfprotov5" "net/url" "regexp" "testing" + "github.com/hashicorp/terraform-plugin-framework/providerserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" r "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + + "github.com/elastic/terraform-provider-ec/ec" ) func TestResourceTrafficFilterAssoc(t *testing.T) { diff --git a/ec/ecresource/trafficfilterassocresource/schema.go b/ec/ecresource/trafficfilterassocresource/schema.go index bd0460d84..84f1e3f7e 100644 --- a/ec/ecresource/trafficfilterassocresource/schema.go +++ b/ec/ecresource/trafficfilterassocresource/schema.go @@ -19,25 +19,29 @@ package trafficfilterassocresource import ( "context" - "github.com/elastic/terraform-provider-ec/ec/internal" + "github.com/hashicorp/terraform-plugin-framework/diag" - tpfprovider "github.com/hashicorp/terraform-plugin-framework/provider" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/elastic/cloud-sdk-go/pkg/api" + + "github.com/elastic/terraform-provider-ec/ec/internal" ) // Ensure provider defined types fully satisfy framework interfaces -var _ tpfprovider.ResourceType = ResourceType{} -var _ resource.Resource = trafficFilterAssocResource{} - -var _ resource.ResourceWithImportState = trafficFilterAssocResource{} +var _ resource.Resource = &Resource{} +var _ resource.ResourceWithConfigure = &Resource{} +var _ resource.ResourceWithGetSchema = &Resource{} +var _ resource.ResourceWithImportState = &Resource{} +var _ resource.ResourceWithMetadata = &Resource{} type ResourceType struct{} const entityTypeDeployment = "deployment" -func (t ResourceType) GetSchema(_ context.Context) (tfsdk.Schema, diag.Diagnostics) { +func (r *Resource) GetSchema(_ context.Context) (tfsdk.Schema, diag.Diagnostics) { return tfsdk.Schema{ Attributes: map[string]tfsdk.Attribute{ "deployment_id": { @@ -66,16 +70,18 @@ func (t ResourceType) GetSchema(_ context.Context) (tfsdk.Schema, diag.Diagnosti }, nil } -func (t ResourceType) NewResource(_ context.Context, provider tpfprovider.Provider) (resource.Resource, diag.Diagnostics) { - p, diags := internal.ConvertProviderType(provider) +type Resource struct { + client *api.API +} - return &trafficFilterAssocResource{ - provider: p, - }, diags +func (r *Resource) Configure(ctx context.Context, request resource.ConfigureRequest, response *resource.ConfigureResponse) { + client, diags := internal.ConvertProviderData(request.ProviderData) + response.Diagnostics.Append(diags...) + r.client = client } -type trafficFilterAssocResource struct { - provider internal.Provider +func (r *Resource) Metadata(ctx context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { + response.TypeName = request.ProviderTypeName + "_deployment_traffic_filter_association" } type modelV0 struct { diff --git a/ec/ecresource/trafficfilterassocresource/update.go b/ec/ecresource/trafficfilterassocresource/update.go index b5a2017c5..e99403fcf 100644 --- a/ec/ecresource/trafficfilterassocresource/update.go +++ b/ec/ecresource/trafficfilterassocresource/update.go @@ -19,9 +19,10 @@ package trafficfilterassocresource import ( "context" + "github.com/hashicorp/terraform-plugin-framework/resource" ) -func (t trafficFilterAssocResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { +func (r Resource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { panic("ec_deployment_traffic_filter_association resources can not be updated!") } diff --git a/ec/ecresource/trafficfilterresource/create.go b/ec/ecresource/trafficfilterresource/create.go index 6beed518d..0e535b7bc 100644 --- a/ec/ecresource/trafficfilterresource/create.go +++ b/ec/ecresource/trafficfilterresource/create.go @@ -20,10 +20,11 @@ package trafficfilterresource import ( "context" - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" ) // Create will create a new deployment traffic filter ruleset diff --git a/ec/ecresource/trafficfilterresource/delete.go b/ec/ecresource/trafficfilterresource/delete.go index cb675f052..5723eace8 100644 --- a/ec/ecresource/trafficfilterresource/delete.go +++ b/ec/ecresource/trafficfilterresource/delete.go @@ -21,11 +21,12 @@ import ( "context" "errors" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" "github.com/elastic/cloud-sdk-go/pkg/client/deployments_traffic_filter" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/ec/ecresource/trafficfilterresource/delete_test.go b/ec/ecresource/trafficfilterresource/delete_test.go index c03db81eb..451835014 100644 --- a/ec/ecresource/trafficfilterresource/delete_test.go +++ b/ec/ecresource/trafficfilterresource/delete_test.go @@ -21,14 +21,17 @@ import ( "context" "testing" + "github.com/stretchr/testify/assert" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/elastic/terraform-provider-ec/ec/internal/util" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" ) func Test_delete(t *testing.T) { diff --git a/ec/ecresource/trafficfilterresource/expanders.go b/ec/ecresource/trafficfilterresource/expanders.go index 9e7d7eb8d..2cb57a10e 100644 --- a/ec/ecresource/trafficfilterresource/expanders.go +++ b/ec/ecresource/trafficfilterresource/expanders.go @@ -18,9 +18,10 @@ package trafficfilterresource import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func expandModel(d *schema.ResourceData) *models.TrafficFilterRulesetRequest { diff --git a/ec/ecresource/trafficfilterresource/expanders_test.go b/ec/ecresource/trafficfilterresource/expanders_test.go index 5189ed213..9fb1746c7 100644 --- a/ec/ecresource/trafficfilterresource/expanders_test.go +++ b/ec/ecresource/trafficfilterresource/expanders_test.go @@ -20,11 +20,12 @@ package trafficfilterresource import ( "testing" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/ec/ecresource/trafficfilterresource/flatteners.go b/ec/ecresource/trafficfilterresource/flatteners.go index 5f2d4615d..2950da74b 100644 --- a/ec/ecresource/trafficfilterresource/flatteners.go +++ b/ec/ecresource/trafficfilterresource/flatteners.go @@ -18,8 +18,9 @@ package trafficfilterresource import ( - "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/elastic/cloud-sdk-go/pkg/models" ) func modelToState(d *schema.ResourceData, res *models.TrafficFilterRulesetInfo) error { diff --git a/ec/ecresource/trafficfilterresource/flatteners_test.go b/ec/ecresource/trafficfilterresource/flatteners_test.go index c660cdd56..a85e52f4f 100644 --- a/ec/ecresource/trafficfilterresource/flatteners_test.go +++ b/ec/ecresource/trafficfilterresource/flatteners_test.go @@ -20,11 +20,12 @@ package trafficfilterresource import ( "testing" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/ec/ecresource/trafficfilterresource/read.go b/ec/ecresource/trafficfilterresource/read.go index f249a4f52..1570508b9 100644 --- a/ec/ecresource/trafficfilterresource/read.go +++ b/ec/ecresource/trafficfilterresource/read.go @@ -20,11 +20,12 @@ package trafficfilterresource import ( "context" - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/ec/ecresource/trafficfilterresource/read_test.go b/ec/ecresource/trafficfilterresource/read_test.go index 6b563ade2..ec765c9a5 100644 --- a/ec/ecresource/trafficfilterresource/read_test.go +++ b/ec/ecresource/trafficfilterresource/read_test.go @@ -21,11 +21,13 @@ import ( "context" "testing" - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/stretchr/testify/assert" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/ec/ecresource/trafficfilterresource/update.go b/ec/ecresource/trafficfilterresource/update.go index 21cb4b3b6..c5b3d87cf 100644 --- a/ec/ecresource/trafficfilterresource/update.go +++ b/ec/ecresource/trafficfilterresource/update.go @@ -20,10 +20,11 @@ package trafficfilterresource import ( "context" - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" ) // Update will update an existing deployment traffic filter ruleset diff --git a/ec/internal/flatteners/flatten_tags.go b/ec/internal/flatteners/flatten_tags.go index 57d05c01e..bc455ad8c 100644 --- a/ec/internal/flatteners/flatten_tags.go +++ b/ec/internal/flatteners/flatten_tags.go @@ -18,9 +18,10 @@ package flatteners import ( - "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/elastic/cloud-sdk-go/pkg/models" ) // flattenTags takes in Deployment Metadata resource models and returns its diff --git a/ec/internal/flatteners/flatten_tags_test.go b/ec/internal/flatteners/flatten_tags_test.go index 9b28f6f85..6775d6b04 100644 --- a/ec/internal/flatteners/flatten_tags_test.go +++ b/ec/internal/flatteners/flatten_tags_test.go @@ -21,9 +21,10 @@ import ( "context" "testing" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" ) func TestFlattenTags(t *testing.T) { diff --git a/ec/internal/planmodifier/default_from_env.go b/ec/internal/planmodifier/default_from_env.go index 77b2fd3cc..8a3dcd526 100644 --- a/ec/internal/planmodifier/default_from_env.go +++ b/ec/internal/planmodifier/default_from_env.go @@ -20,10 +20,11 @@ package planmodifier import ( "context" "fmt" - "github.com/elastic/terraform-provider-ec/ec/internal/util" - "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) // defaultFromEnvAttributePlanModifier specifies a default value (attr.Value) for an attribute. diff --git a/ec/internal/provider.go b/ec/internal/provider.go index ce0e3f6aa..1345c510e 100644 --- a/ec/internal/provider.go +++ b/ec/internal/provider.go @@ -20,42 +20,27 @@ package internal import ( "fmt" - "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/hashicorp/terraform-plugin-framework/diag" - "github.com/hashicorp/terraform-plugin-framework/provider" -) -// Provider is an interface required to avoid import cycles in datasource / resource packages -type Provider interface { - provider.Provider - GetClient() *api.API -} + "github.com/elastic/cloud-sdk-go/pkg/api" +) -// ConvertProviderType is a helper function for NewResource and NewDataSource -// implementations to associate the concrete provider type. Alternatively, -// this helper can be skipped and the provider type can be directly type -// asserted (e.g. provider: in.(*provider)), however using this can prevent -// potential panics. -func ConvertProviderType(in provider.Provider) (Provider, diag.Diagnostics) { +// ConvertProviderData is a helper function for DataSource.Configure and Resource.Configure implementations +func ConvertProviderData(providerData any) (*api.API, diag.Diagnostics) { var diags diag.Diagnostics - p, ok := in.(Provider) + if providerData == nil { + return nil, diags + } + client, ok := providerData.(*api.API) if !ok { diags.AddError( - "Unexpected Provider Instance Type", - fmt.Sprintf("While creating the data source or resource, an unexpected provider type (%T) was received. This is always a bug in the provider code and should be reported to the provider developers.", p), + "Unexpected Provider Data", + fmt.Sprintf("Expected *api.API, got: %T. Please report this issue to the provider developers.", providerData), ) - return p, diags - } - if p == nil { - diags.AddError( - "Unexpected Provider Instance Type", - "While creating the data source or resource, an unexpected empty provider instance was received. This is always a bug in the provider code and should be reported to the provider developers.", - ) - return p, diags + return nil, diags } - - return p, diags + return client, diags } diff --git a/ec/internal/util/helpers.go b/ec/internal/util/helpers.go index d00952da5..4abc5b5c7 100644 --- a/ec/internal/util/helpers.go +++ b/ec/internal/util/helpers.go @@ -19,11 +19,12 @@ package util import ( "fmt" + "os" + "strconv" + "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "os" - "strconv" "github.com/elastic/cloud-sdk-go/pkg/models" ) diff --git a/ec/internal/util/helpers_test.go b/ec/internal/util/helpers_test.go index d07d5f897..bee83a1e5 100644 --- a/ec/internal/util/helpers_test.go +++ b/ec/internal/util/helpers_test.go @@ -20,9 +20,10 @@ package util import ( "testing" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" ) func TestFlattenClusterEndpoint(t *testing.T) { diff --git a/ec/internal/util/parsers_test.go b/ec/internal/util/parsers_test.go index 317b50755..2f8f3c1a5 100644 --- a/ec/internal/util/parsers_test.go +++ b/ec/internal/util/parsers_test.go @@ -21,9 +21,10 @@ import ( "errors" "testing" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" ) func TestMemoryToState(t *testing.T) { diff --git a/ec/internal/util/testutils.go b/ec/internal/util/testutils.go index b55318c56..ee5e5491f 100644 --- a/ec/internal/util/testutils.go +++ b/ec/internal/util/testutils.go @@ -22,9 +22,10 @@ import ( "errors" "testing" - "github.com/elastic/cloud-sdk-go/pkg/multierror" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/elastic/cloud-sdk-go/pkg/multierror" ) // ResDataParams holds the raw configuration for NewResourceData to consume diff --git a/ec/internal/util/traffic_filter_err_test.go b/ec/internal/util/traffic_filter_err_test.go index 5da4e4f5a..5ac31a5b4 100644 --- a/ec/internal/util/traffic_filter_err_test.go +++ b/ec/internal/util/traffic_filter_err_test.go @@ -20,9 +20,10 @@ package util import ( "testing" + "github.com/go-openapi/runtime" + "github.com/elastic/cloud-sdk-go/pkg/api/apierror" "github.com/elastic/cloud-sdk-go/pkg/client/deployments_traffic_filter" - "github.com/go-openapi/runtime" ) func TestTrafficFilterNotFound(t *testing.T) { diff --git a/ec/internal/validators/knownvalidator.go b/ec/internal/validators/knownvalidator.go index 129c93506..8ef7bf0c0 100644 --- a/ec/internal/validators/knownvalidator.go +++ b/ec/internal/validators/knownvalidator.go @@ -19,6 +19,7 @@ package validators import ( "context" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" ) diff --git a/ec/provider.go b/ec/provider.go index c613f2c9d..afffa4176 100644 --- a/ec/provider.go +++ b/ec/provider.go @@ -22,6 +22,18 @@ import ( "fmt" "time" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/terraform-provider-ec/ec/ecdatasource/deploymentdatasource" "github.com/elastic/terraform-provider-ec/ec/ecdatasource/deploymentsdatasource" "github.com/elastic/terraform-provider-ec/ec/ecdatasource/stackdatasource" @@ -30,18 +42,9 @@ import ( "github.com/elastic/terraform-provider-ec/ec/ecresource/extensionresource" "github.com/elastic/terraform-provider-ec/ec/ecresource/trafficfilterassocresource" "github.com/elastic/terraform-provider-ec/ec/ecresource/trafficfilterresource" - "github.com/elastic/terraform-provider-ec/ec/internal" "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" "github.com/elastic/terraform-provider-ec/ec/internal/util" "github.com/elastic/terraform-provider-ec/ec/internal/validators" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/hashicorp/terraform-plugin-framework/diag" - "github.com/hashicorp/terraform-plugin-framework/provider" - "github.com/hashicorp/terraform-plugin-framework/tfsdk" - "github.com/hashicorp/terraform-plugin-framework/types" ) const ( @@ -146,17 +149,34 @@ func ProviderWithClient(client *api.API, version string) provider.Provider { return &Provider{client: client, version: version} } -var _ internal.Provider = (*Provider)(nil) - -func (p *Provider) GetClient() *api.API { - return p.client -} +var _ provider.Provider = (*Provider)(nil) +var _ provider.ProviderWithMetadata = (*Provider)(nil) +var _ provider.ProviderWithDataSources = (*Provider)(nil) +var _ provider.ProviderWithResources = (*Provider)(nil) type Provider struct { version string client *api.API } +func (p *Provider) Metadata(ctx context.Context, request provider.MetadataRequest, response *provider.MetadataResponse) { + response.TypeName = "ec" +} + +func (p *Provider) DataSources(ctx context.Context) []func() datasource.DataSource { + return []func() datasource.DataSource{ + func() datasource.DataSource { return &deploymentdatasource.DataSource{} }, + func() datasource.DataSource { return &deploymentsdatasource.DataSource{} }, + func() datasource.DataSource { return &stackdatasource.DataSource{} }, + } +} + +func (p *Provider) Resources(ctx context.Context) []func() resource.Resource { + return []func() resource.Resource{ + func() resource.Resource { return &trafficfilterassocresource.Resource{} }, + } +} + func (p *Provider) GetSchema(context.Context) (tfsdk.Schema, diag.Diagnostics) { var diags diag.Diagnostics @@ -229,6 +249,9 @@ type providerData struct { func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, res *provider.ConfigureResponse) { if p.client != nil { + // Required for unit tests, because a mock client is pre-created there. + res.DataSourceData = p.client + res.ResourceData = p.client return } @@ -345,7 +368,7 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, return } - p.client, err = api.NewAPI(cfg) + client, err := api.NewAPI(cfg) if err != nil { res.Diagnostics.AddWarning( "Unable to create api Client config", @@ -353,18 +376,8 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, ) return } -} - -func (p *Provider) GetResources(_ context.Context) (map[string]provider.ResourceType, diag.Diagnostics) { - return map[string]provider.ResourceType{ - "ec_deployment_traffic_filter_association": trafficfilterassocresource.ResourceType{}, - }, nil -} -func (p *Provider) GetDataSources(_ context.Context) (map[string]provider.DataSourceType, diag.Diagnostics) { - return map[string]provider.DataSourceType{ - "ec_stack": stackdatasource.DataSourceType{}, - "ec_deployment": deploymentdatasource.DataSourceType{}, - "ec_deployments": deploymentsdatasource.DataSourceType{}, - }, nil + p.client = client + res.DataSourceData = client + res.ResourceData = client } diff --git a/ec/provider_config.go b/ec/provider_config.go index 6e8969fd5..1be54bfab 100644 --- a/ec/provider_config.go +++ b/ec/provider_config.go @@ -24,12 +24,13 @@ import ( "os" "time" - "github.com/elastic/terraform-provider-ec/ec/internal/util" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/auth" + + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) const ( diff --git a/ec/provider_config_test.go b/ec/provider_config_test.go index 2419f828b..5a0dffa94 100644 --- a/ec/provider_config_test.go +++ b/ec/provider_config_test.go @@ -26,11 +26,12 @@ import ( "syscall" "testing" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/auth" "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) diff --git a/gen/gen.go b/gen/gen.go index 68f6ae362..b533af753 100644 --- a/gen/gen.go +++ b/gen/gen.go @@ -17,6 +17,7 @@ // This program generates ec/version.go. It can be invoked by running // make generate +//go:build ignore // +build ignore package main diff --git a/go.mod b/go.mod index 2eb44e9e5..3097048ac 100644 --- a/go.mod +++ b/go.mod @@ -8,13 +8,13 @@ require ( github.com/elastic/cloud-sdk-go v1.10.0 github.com/go-openapi/runtime v0.24.1 github.com/go-openapi/strfmt v0.21.3 - github.com/hashicorp/terraform-plugin-framework v0.11.1 + github.com/hashicorp/terraform-plugin-framework v0.12.0 github.com/hashicorp/terraform-plugin-go v0.14.0 github.com/hashicorp/terraform-plugin-log v0.7.0 github.com/hashicorp/terraform-plugin-mux v0.7.0 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.21.0 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.22.0 github.com/stretchr/testify v1.8.0 - golang.org/x/exp v0.0.0-20220827204233-334a2380cb91 + golang.org/x/exp v0.0.0-20220914170420-dc92f8653013 ) require ( @@ -33,7 +33,7 @@ require ( github.com/go-openapi/swag v0.22.3 // indirect github.com/go-openapi/validate v0.22.0 // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/google/go-cmp v0.5.8 // indirect + github.com/google/go-cmp v0.5.9 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -44,9 +44,9 @@ require ( github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/hc-install v0.4.0 // indirect - github.com/hashicorp/hcl/v2 v2.13.0 // indirect + github.com/hashicorp/hcl/v2 v2.14.0 // indirect github.com/hashicorp/logutils v1.0.0 // indirect - github.com/hashicorp/terraform-exec v0.17.2 // indirect + github.com/hashicorp/terraform-exec v0.17.3 // indirect github.com/hashicorp/terraform-json v0.14.0 // indirect github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c // indirect github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 // indirect @@ -68,13 +68,13 @@ require ( github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect github.com/vmihailenco/tagparser v0.1.2 // indirect github.com/zclconf/go-cty v1.11.0 // indirect - go.mongodb.org/mongo-driver v1.10.1 // indirect - golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d // indirect - golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b // indirect - golang.org/x/sys v0.0.0-20220829200755-d48e67d00261 // indirect + go.mongodb.org/mongo-driver v1.10.2 // indirect + golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 // indirect + golang.org/x/net v0.0.0-20220909164309-bea034e7d591 // indirect + golang.org/x/sys v0.0.0-20220913175220-63ea55921009 // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf // indirect + google.golang.org/genproto v0.0.0-20220914210030-581e60b4ef85 // indirect google.golang.org/grpc v1.49.0 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index c4c8cf567..005fd4fa8 100644 --- a/go.sum +++ b/go.sum @@ -18,6 +18,7 @@ github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYU github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFUye+ZcSR6opIgz9Co7WcDx6ZcY+RjfFHoA0I= +github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0= github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= @@ -220,6 +221,8 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= @@ -251,14 +254,20 @@ github.com/hashicorp/hc-install v0.4.0 h1:cZkRFr1WVa0Ty6x5fTvL1TuO1flul231rWkGH9 github.com/hashicorp/hc-install v0.4.0/go.mod h1:5d155H8EC5ewegao9A4PUTMNPZaq+TbOzkJJZ4vrXeI= github.com/hashicorp/hcl/v2 v2.13.0 h1:0Apadu1w6M11dyGFxWnmhhcMjkbAiKCv7G1r/2QgCNc= github.com/hashicorp/hcl/v2 v2.13.0/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0= +github.com/hashicorp/hcl/v2 v2.14.0 h1:jX6+Q38Ly9zaAJlAjnFVyeNSNCKKW8D0wvyg7vij5Wc= +github.com/hashicorp/hcl/v2 v2.14.0/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/terraform-exec v0.17.2 h1:EU7i3Fh7vDUI9nNRdMATCEfnm9axzTnad8zszYZ73Go= github.com/hashicorp/terraform-exec v0.17.2/go.mod h1:tuIbsL2l4MlwwIZx9HPM+LOV9vVyEfBYu2GsO1uH3/8= +github.com/hashicorp/terraform-exec v0.17.3 h1:MX14Kvnka/oWGmIkyuyvL6POx25ZmKrjlaclkx3eErU= +github.com/hashicorp/terraform-exec v0.17.3/go.mod h1:+NELG0EqQekJzhvikkeQsOAZpsw0cv/03rbeQJqscAI= github.com/hashicorp/terraform-json v0.14.0 h1:sh9iZ1Y8IFJLx+xQiKHGud6/TSUCM0N8e17dKDpqV7s= github.com/hashicorp/terraform-json v0.14.0/go.mod h1:5A9HIWPkk4e5aeeXIBbkcOvaZbIYnAIkEyqP2pNSckM= github.com/hashicorp/terraform-plugin-framework v0.11.1 h1:rq8f+TLDO4tJu+n9mMYlDrcRoIdrg0gTUvV2Jr0Ya24= github.com/hashicorp/terraform-plugin-framework v0.11.1/go.mod h1:GENReHOz6GEt8Jk3UN94vk8BdC6irEHFgN3Z9HPhPUU= +github.com/hashicorp/terraform-plugin-framework v0.12.0 h1:Bk3l5MQUaZoo5eplr+u1FomYqGS564e8Tp3rutnCfYg= +github.com/hashicorp/terraform-plugin-framework v0.12.0/go.mod h1:wcZdk4+Uef6Ng+BiBJjGAcIPlIs5bhlEV/TA1k6Xkq8= github.com/hashicorp/terraform-plugin-go v0.14.0 h1:ttnSlS8bz3ZPYbMb84DpcPhY4F5DsQtcAS7cHo8uvP4= github.com/hashicorp/terraform-plugin-go v0.14.0/go.mod h1:2nNCBeRLaenyQEi78xrGrs9hMbulveqG/zDMQSvVJTE= github.com/hashicorp/terraform-plugin-log v0.7.0 h1:SDxJUyT8TwN4l5b5/VkiTIaQgY6R+Y2BQ0sRZftGKQs= @@ -267,6 +276,8 @@ github.com/hashicorp/terraform-plugin-mux v0.7.0 h1:wRbSYzg+v2sn5Mdee0UKm4YTt4wJ github.com/hashicorp/terraform-plugin-mux v0.7.0/go.mod h1:Ae30Mc5lz4d1awtiCbHP0YyvgBeiQ00Q1nAq0U3lb+I= github.com/hashicorp/terraform-plugin-sdk/v2 v2.21.0 h1:eIJjFlI4k6BMso6Wq/bq56U0RukXc4JbwJJ8Oze2/tg= github.com/hashicorp/terraform-plugin-sdk/v2 v2.21.0/go.mod h1:mYPs/uchNcBq7AclQv9QUtSf9iNcfp1Ag21jqTlDf2M= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.22.0 h1:MzfNfrheTt24xbEbA4npUSbX3GYu4xjXS7czcpJFyQY= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.22.0/go.mod h1:q1XKSxXg9nDmhV0IvNZNZxe3gcTAHzMqrjs8wX1acng= github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c h1:D8aRO6+mTqHfLsK/BC3j5OAoogv1WLRWzY1AaTo3rBg= github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c/go.mod h1:Wn3Na71knbXc1G8Lh+yu/dQWWJeFQEpDeJMtWMtlmNI= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 h1:HKLsbzeOsfXmKNpr3GiT18XAblV0BjCbzL8KQAMZGa0= @@ -428,6 +439,8 @@ go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCu go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= go.mongodb.org/mongo-driver v1.10.1 h1:NujsPveKwHaWuKUer/ceo9DzEe7HIj1SlJ6uvXZG0S4= go.mongodb.org/mongo-driver v1.10.1/go.mod h1:z4XpeoU6w+9Vht+jAFyLgVrD+jGSQQe0+CBWFHNiHt8= +go.mongodb.org/mongo-driver v1.10.2 h1:4Wk3cnqOrQCn0P92L3/mmurMxzdvWWs5J9jinAVKD+k= +go.mongodb.org/mongo-driver v1.10.2/go.mod h1:z4XpeoU6w+9Vht+jAFyLgVrD+jGSQQe0+CBWFHNiHt8= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -444,8 +457,12 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 h1:Y/gsMcFOcR+6S6f3YeMKl5g+dZMEWqcz5Czj/GWYbkM= +golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20220827204233-334a2380cb91 h1:tnebWN09GYg9OLPss1KXj8txwZc6X6uMr6VFdcGNbHw= golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/exp v0.0.0-20220914170420-dc92f8653013 h1:ZjglnWxEUdPyXl4o/j4T89SRCI+4X6NW6185PNLEOF4= +golang.org/x/exp v0.0.0-20220914170420-dc92f8653013/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -473,6 +490,8 @@ golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b h1:ZmngSVLe/wycRns9MKikG9OWIEjGcGAkacif7oYQaUY= golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591 h1:D0B/7al0LLrVC8aWF4+oxpv/m8bc7ViFfVS8/gXGdqI= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -512,6 +531,8 @@ golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261 h1:v6hYoSR9T5oet+pMXwUWkbiVqx/63mlHjefrHmxwfeY= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220913175220-63ea55921009 h1:PuvuRMeLWqsf/ZdT1UUZz0syhioyv1mzuFZsXs4fvhw= +golang.org/x/sys v0.0.0-20220913175220-63ea55921009/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= @@ -541,6 +562,8 @@ google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6 google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf h1:Q5xNKbTSFwkuaaGaR7CMcXEM5sy19KYdUU8iF8/iRC0= google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220914210030-581e60b4ef85 h1:lkYqfLZL9+9C+SltHOTeOHL6uueWYYkGp5NoeOZQsis= +google.golang.org/genproto v0.0.0-20220914210030-581e60b4ef85/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw= google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= From c197f28cdf5834ad1cd670ff8f3076753afd05dc Mon Sep 17 00:00:00 2001 From: Pascal Hofmann Date: Sun, 18 Sep 2022 06:36:33 +0200 Subject: [PATCH 007/104] Migrate resource ec_deployment_traffic_filter to terraform-provider-framework The behavior of the default_value plan_modifier changes, so it also applies default values when the value has been explicitly specified before. Unit test timeout increased. Switch to v6 provider in tests. --- build/Makefile.test | 2 +- ec/acc/acc_prereq.go | 27 +- ec/acc/datasource_deployment_basic_test.go | 2 +- ec/acc/datasource_stack_test.go | 4 +- ec/acc/datasource_tags_test.go | 2 +- ec/acc/deployment_autoscaling_test.go | 2 +- ec/acc/deployment_basic_defaults_test.go | 4 +- ec/acc/deployment_basic_tags_test.go | 2 +- ec/acc/deployment_basic_test.go | 4 +- ec/acc/deployment_ccs_test.go | 2 +- ec/acc/deployment_compute_optimized_test.go | 2 +- ec/acc/deployment_dedicated_test.go | 4 +- .../deployment_docker_image_override_test.go | 2 +- .../deployment_elasticsearch_kesytore_test.go | 2 +- ec/acc/deployment_emptyconf_test.go | 2 +- ec/acc/deployment_enterprise_search_test.go | 2 +- ec/acc/deployment_extension_basic_test.go | 2 +- .../deployment_extension_bundle_file_test.go | 2 +- ...ployment_extension_plugin_download_test.go | 2 +- .../deployment_failed_upgrade_retry_test.go | 2 +- ec/acc/deployment_hotwarm_test.go | 2 +- ec/acc/deployment_integrations_server_test.go | 2 +- ec/acc/deployment_memory_optimized_test.go | 2 +- ec/acc/deployment_observability_self_test.go | 2 +- ec/acc/deployment_observability_test.go | 2 +- ec/acc/deployment_observability_tpl_test.go | 2 +- .../deployment_post_node_role_upgrade_test.go | 2 +- ...deployment_pre_node_role_migration_test.go | 2 +- ec/acc/deployment_security_test.go | 2 +- ec/acc/deployment_snapshot_test.go | 2 +- ...loyment_traffic_filter_association_test.go | 4 +- ec/acc/deployment_traffic_filter_test.go | 54 +- .../deployment_with_extension_bundle_test.go | 2 +- .../trafficfilterassocresource/create.go | 8 +- .../trafficfilterassocresource/delete.go | 8 +- .../trafficfilterassocresource/read.go | 8 +- .../resource_test.go | 24 +- .../trafficfilterassocresource/schema.go | 14 +- ec/ecresource/trafficfilterresource/create.go | 46 +- ec/ecresource/trafficfilterresource/delete.go | 42 +- .../trafficfilterresource/delete_test.go | 218 ------ .../trafficfilterresource/expanders.go | 51 +- .../trafficfilterresource/expanders_test.go | 92 ++- .../trafficfilterresource/flatteners.go | 73 +- .../trafficfilterresource/flatteners_test.go | 163 ++--- ec/ecresource/trafficfilterresource/read.go | 51 +- .../trafficfilterresource/read_test.go | 116 ---- .../trafficfilterresource/resource.go | 44 -- .../trafficfilterresource/resource_test.go | 636 ++++++++++++++++++ ec/ecresource/trafficfilterresource/schema.go | 239 ++++--- .../trafficfilterresource/schema_test.go | 69 -- .../trafficfilterresource/testutils.go | 43 +- ec/ecresource/trafficfilterresource/update.go | 43 +- ec/internal/planmodifier/default_value.go | 7 - ec/provider.go | 2 +- 55 files changed, 1265 insertions(+), 885 deletions(-) delete mode 100644 ec/ecresource/trafficfilterresource/delete_test.go delete mode 100644 ec/ecresource/trafficfilterresource/read_test.go delete mode 100644 ec/ecresource/trafficfilterresource/resource.go create mode 100644 ec/ecresource/trafficfilterresource/resource_test.go delete mode 100644 ec/ecresource/trafficfilterresource/schema_test.go diff --git a/build/Makefile.test b/build/Makefile.test index 1622402c9..e1229d14f 100644 --- a/build/Makefile.test +++ b/build/Makefile.test @@ -3,7 +3,7 @@ SWEEP_DIR ?= $(TEST_ACC) SWEEP_CI_RUN_FILTER ?= ec_deployments TEST ?= ./... TEST_COUNT ?= 1 -TESTUNITARGS ?= -timeout 10s -p 4 -race -cover -coverprofile=reports/c.out +TESTUNITARGS ?= -timeout 30s -p 4 -race -cover -coverprofile=reports/c.out TEST_ACC ?= github.com/elastic/terraform-provider-ec/ec/acc TEST_NAME ?= TestAcc TEST_ACC_PARALLEL = 6 diff --git a/ec/acc/acc_prereq.go b/ec/acc/acc_prereq.go index 516f07de9..c0709e41f 100644 --- a/ec/acc/acc_prereq.go +++ b/ec/acc/acc_prereq.go @@ -24,8 +24,9 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-framework/providerserver" - "github.com/hashicorp/terraform-plugin-go/tfprotov5" - "github.com/hashicorp/terraform-plugin-mux/tf5muxserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-mux/tf5to6server" + "github.com/hashicorp/terraform-plugin-mux/tf6muxserver" "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/auth" @@ -37,16 +38,20 @@ const ( prefix = "terraform_acc_" ) -var testAccProviderFactory = protoV5ProviderFactories() - -func protoV5ProviderFactories() map[string]func() (tfprotov5.ProviderServer, error) { - return map[string]func() (tfprotov5.ProviderServer, error){ - "ec": func() (tfprotov5.ProviderServer, error) { - return tf5muxserver.NewMuxServer(context.Background(), - func() tfprotov5.ProviderServer { - return ec.LegacyProvider().GRPCProvider() +var testAccProviderFactory = protoV6ProviderFactories() + +func protoV6ProviderFactories() map[string]func() (tfprotov6.ProviderServer, error) { + return map[string]func() (tfprotov6.ProviderServer, error){ + "ec": func() (tfprotov6.ProviderServer, error) { + return tf6muxserver.NewMuxServer(context.Background(), + func() tfprotov6.ProviderServer { + upgradedSdkProvider, _ := tf5to6server.UpgradeServer( + context.Background(), + ec.LegacyProvider().GRPCProvider, + ) + return upgradedSdkProvider }, - providerserver.NewProtocol5(ec.New("acc-tests")), + providerserver.NewProtocol6(ec.New("acc-tests")), ) }, } diff --git a/ec/acc/datasource_deployment_basic_test.go b/ec/acc/datasource_deployment_basic_test.go index 556b87570..a38dc8d75 100644 --- a/ec/acc/datasource_deployment_basic_test.go +++ b/ec/acc/datasource_deployment_basic_test.go @@ -39,7 +39,7 @@ func TestAccDatasourceDeployment_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { diff --git a/ec/acc/datasource_stack_test.go b/ec/acc/datasource_stack_test.go index e09613574..6e9c0e11b 100644 --- a/ec/acc/datasource_stack_test.go +++ b/ec/acc/datasource_stack_test.go @@ -32,7 +32,7 @@ func TestAccDatasourceStack_latest(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, Steps: []resource.TestStep{ { Config: cfg, @@ -54,7 +54,7 @@ func TestAccDatasourceStack_regex(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, Steps: []resource.TestStep{ { Config: cfg, diff --git a/ec/acc/datasource_tags_test.go b/ec/acc/datasource_tags_test.go index 3a653d832..ef22f64fc 100644 --- a/ec/acc/datasource_tags_test.go +++ b/ec/acc/datasource_tags_test.go @@ -44,7 +44,7 @@ func TestAccDatasource_basic_tags(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { diff --git a/ec/acc/deployment_autoscaling_test.go b/ec/acc/deployment_autoscaling_test.go index 84b8d8f86..1b93ea8de 100644 --- a/ec/acc/deployment_autoscaling_test.go +++ b/ec/acc/deployment_autoscaling_test.go @@ -38,7 +38,7 @@ func TestAccDeployment_autoscaling(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { diff --git a/ec/acc/deployment_basic_defaults_test.go b/ec/acc/deployment_basic_defaults_test.go index 9de4f7e2c..5aa745471 100644 --- a/ec/acc/deployment_basic_defaults_test.go +++ b/ec/acc/deployment_basic_defaults_test.go @@ -43,7 +43,7 @@ func TestAccDeployment_basic_defaults(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { @@ -162,7 +162,7 @@ func TestAccDeployment_basic_defaults_hw(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { diff --git a/ec/acc/deployment_basic_tags_test.go b/ec/acc/deployment_basic_tags_test.go index 80fbe6caa..e74719be2 100644 --- a/ec/acc/deployment_basic_tags_test.go +++ b/ec/acc/deployment_basic_tags_test.go @@ -43,7 +43,7 @@ func TestAccDeployment_basic_tags(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { diff --git a/ec/acc/deployment_basic_test.go b/ec/acc/deployment_basic_test.go index 80987c676..42e35d401 100644 --- a/ec/acc/deployment_basic_test.go +++ b/ec/acc/deployment_basic_test.go @@ -43,7 +43,7 @@ func TestAccDeployment_basic_tf(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { @@ -99,7 +99,7 @@ func TestAccDeployment_basic_config(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { diff --git a/ec/acc/deployment_ccs_test.go b/ec/acc/deployment_ccs_test.go index 16ff4f660..0edd4ffd5 100644 --- a/ec/acc/deployment_ccs_test.go +++ b/ec/acc/deployment_ccs_test.go @@ -44,7 +44,7 @@ func TestAccDeployment_ccs(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { diff --git a/ec/acc/deployment_compute_optimized_test.go b/ec/acc/deployment_compute_optimized_test.go index 6313e6633..88b97884e 100644 --- a/ec/acc/deployment_compute_optimized_test.go +++ b/ec/acc/deployment_compute_optimized_test.go @@ -34,7 +34,7 @@ func TestAccDeployment_computeOptimized(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { diff --git a/ec/acc/deployment_dedicated_test.go b/ec/acc/deployment_dedicated_test.go index 202caf5f4..2ca44dfe1 100644 --- a/ec/acc/deployment_dedicated_test.go +++ b/ec/acc/deployment_dedicated_test.go @@ -32,7 +32,7 @@ func TestAccDeployment_dedicated_coordinating(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { @@ -79,7 +79,7 @@ func TestAccDeployment_dedicated_master(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { diff --git a/ec/acc/deployment_docker_image_override_test.go b/ec/acc/deployment_docker_image_override_test.go index d977774f9..7ef00c38b 100644 --- a/ec/acc/deployment_docker_image_override_test.go +++ b/ec/acc/deployment_docker_image_override_test.go @@ -45,7 +45,7 @@ func TestAccDeployment_docker_image_override(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { diff --git a/ec/acc/deployment_elasticsearch_kesytore_test.go b/ec/acc/deployment_elasticsearch_kesytore_test.go index 01a7f9207..aafcc0eee 100644 --- a/ec/acc/deployment_elasticsearch_kesytore_test.go +++ b/ec/acc/deployment_elasticsearch_kesytore_test.go @@ -48,7 +48,7 @@ func TestAccDeploymentElasticsearchKeystore_full(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: resource.ComposeAggregateTestCheckFunc( testAccDeploymentDestroy, testAccDeploymentElasticsearchKeystoreDestroy, diff --git a/ec/acc/deployment_emptyconf_test.go b/ec/acc/deployment_emptyconf_test.go index 96126adca..e9532d6ae 100644 --- a/ec/acc/deployment_emptyconf_test.go +++ b/ec/acc/deployment_emptyconf_test.go @@ -37,7 +37,7 @@ func TestAccDeployment_emptyconfig(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { diff --git a/ec/acc/deployment_enterprise_search_test.go b/ec/acc/deployment_enterprise_search_test.go index 51a3b4b96..3570b2485 100644 --- a/ec/acc/deployment_enterprise_search_test.go +++ b/ec/acc/deployment_enterprise_search_test.go @@ -34,7 +34,7 @@ func TestAccDeployment_enterpriseSearch(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { diff --git a/ec/acc/deployment_extension_basic_test.go b/ec/acc/deployment_extension_basic_test.go index 8395880f2..7696d6a97 100644 --- a/ec/acc/deployment_extension_basic_test.go +++ b/ec/acc/deployment_extension_basic_test.go @@ -35,7 +35,7 @@ func TestAccDeploymentExtension_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccExtensionDestroy, Steps: []resource.TestStep{ { diff --git a/ec/acc/deployment_extension_bundle_file_test.go b/ec/acc/deployment_extension_bundle_file_test.go index 25f8cc789..23f71a039 100644 --- a/ec/acc/deployment_extension_bundle_file_test.go +++ b/ec/acc/deployment_extension_bundle_file_test.go @@ -45,7 +45,7 @@ func TestAccDeploymentExtension_bundleFile(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccExtensionDestroy, Steps: []resource.TestStep{ { diff --git a/ec/acc/deployment_extension_plugin_download_test.go b/ec/acc/deployment_extension_plugin_download_test.go index 807838842..d73175cd4 100644 --- a/ec/acc/deployment_extension_plugin_download_test.go +++ b/ec/acc/deployment_extension_plugin_download_test.go @@ -35,7 +35,7 @@ func TestAccDeploymentExtension_pluginDownload(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccExtensionDestroy, Steps: []resource.TestStep{ { diff --git a/ec/acc/deployment_failed_upgrade_retry_test.go b/ec/acc/deployment_failed_upgrade_retry_test.go index 49d4d0505..318c5a74c 100644 --- a/ec/acc/deployment_failed_upgrade_retry_test.go +++ b/ec/acc/deployment_failed_upgrade_retry_test.go @@ -33,7 +33,7 @@ func TestAccDeployment_failed_upgrade_retry(t *testing.T) { resName := "ec_deployment.upgrade_retry" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { diff --git a/ec/acc/deployment_hotwarm_test.go b/ec/acc/deployment_hotwarm_test.go index 82f3d81ad..3ca41cf9d 100644 --- a/ec/acc/deployment_hotwarm_test.go +++ b/ec/acc/deployment_hotwarm_test.go @@ -39,7 +39,7 @@ func TestAccDeployment_hotwarm(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { diff --git a/ec/acc/deployment_integrations_server_test.go b/ec/acc/deployment_integrations_server_test.go index 34f7a4f55..8b000869d 100644 --- a/ec/acc/deployment_integrations_server_test.go +++ b/ec/acc/deployment_integrations_server_test.go @@ -34,7 +34,7 @@ func TestAccDeployment_integrationsServer(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { diff --git a/ec/acc/deployment_memory_optimized_test.go b/ec/acc/deployment_memory_optimized_test.go index bb3961cef..d2d2be8ab 100644 --- a/ec/acc/deployment_memory_optimized_test.go +++ b/ec/acc/deployment_memory_optimized_test.go @@ -34,7 +34,7 @@ func TestAccDeployment_memoryOptimized(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { diff --git a/ec/acc/deployment_observability_self_test.go b/ec/acc/deployment_observability_self_test.go index f02ad5e9b..7b6c9a07c 100644 --- a/ec/acc/deployment_observability_self_test.go +++ b/ec/acc/deployment_observability_self_test.go @@ -34,7 +34,7 @@ func TestAccDeployment_observability_createWithSelfObservability(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { diff --git a/ec/acc/deployment_observability_test.go b/ec/acc/deployment_observability_test.go index 050ad7ada..75e6aaa3e 100644 --- a/ec/acc/deployment_observability_test.go +++ b/ec/acc/deployment_observability_test.go @@ -41,7 +41,7 @@ func TestAccDeployment_observability(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { diff --git a/ec/acc/deployment_observability_tpl_test.go b/ec/acc/deployment_observability_tpl_test.go index a5d343afb..dbcebb6ab 100644 --- a/ec/acc/deployment_observability_tpl_test.go +++ b/ec/acc/deployment_observability_tpl_test.go @@ -34,7 +34,7 @@ func TestAccDeployment_observabilityTpl(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { diff --git a/ec/acc/deployment_post_node_role_upgrade_test.go b/ec/acc/deployment_post_node_role_upgrade_test.go index f60912224..01caca25a 100644 --- a/ec/acc/deployment_post_node_role_upgrade_test.go +++ b/ec/acc/deployment_post_node_role_upgrade_test.go @@ -38,7 +38,7 @@ func TestAccDeployment_post_node_roles(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { diff --git a/ec/acc/deployment_pre_node_role_migration_test.go b/ec/acc/deployment_pre_node_role_migration_test.go index be045253f..ea1b0b61a 100644 --- a/ec/acc/deployment_pre_node_role_migration_test.go +++ b/ec/acc/deployment_pre_node_role_migration_test.go @@ -39,7 +39,7 @@ func TestAccDeployment_pre_node_roles(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { diff --git a/ec/acc/deployment_security_test.go b/ec/acc/deployment_security_test.go index 2fc3ea281..aa015d3f1 100644 --- a/ec/acc/deployment_security_test.go +++ b/ec/acc/deployment_security_test.go @@ -34,7 +34,7 @@ func TestAccDeployment_security(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { diff --git a/ec/acc/deployment_snapshot_test.go b/ec/acc/deployment_snapshot_test.go index 0fb391521..f8b91d78b 100644 --- a/ec/acc/deployment_snapshot_test.go +++ b/ec/acc/deployment_snapshot_test.go @@ -39,7 +39,7 @@ func TestAccDeployment_snapshot_restore(t *testing.T) { var esCreds creds resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { diff --git a/ec/acc/deployment_traffic_filter_association_test.go b/ec/acc/deployment_traffic_filter_association_test.go index ab657ad1e..161d8189e 100644 --- a/ec/acc/deployment_traffic_filter_association_test.go +++ b/ec/acc/deployment_traffic_filter_association_test.go @@ -39,7 +39,7 @@ func TestAccDeploymentTrafficFilterAssociation_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentTrafficFilterDestroy, Steps: []resource.TestStep{ { @@ -116,7 +116,7 @@ terraform { }, { PlanOnly: true, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, Config: ignoreChangesCfg, Check: checkBasicDeploymentTrafficFilterAssociationResource( resName, resAssocName, randomName, diff --git a/ec/acc/deployment_traffic_filter_test.go b/ec/acc/deployment_traffic_filter_test.go index 86e792d16..c2fd78adf 100644 --- a/ec/acc/deployment_traffic_filter_test.go +++ b/ec/acc/deployment_traffic_filter_test.go @@ -39,7 +39,7 @@ func TestAccDeploymentTrafficFilter_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentTrafficFilterDestroy, Steps: []resource.TestStep{ { @@ -97,7 +97,7 @@ func TestAccDeploymentTrafficFilter_azure(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: testAccDeploymentTrafficFilterDestroy, Steps: []resource.TestStep{ { @@ -113,6 +113,56 @@ func TestAccDeploymentTrafficFilter_azure(t *testing.T) { }) } +func TestAccDeploymentTrafficFilter_UpgradeFrom0_4_1(t *testing.T) { + resName := "ec_deployment_traffic_filter.basic" + randomName := prefix + acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + startCfg := "testdata/deployment_traffic_filter_basic.tf" + cfg := fixtureAccDeploymentTrafficFilterResourceBasic(t, startCfg, randomName, getRegion()) + + // Required because of a bug - see https://discuss.hashicorp.com/t/acceptance-testing-sdk-framework-upgrade-issue/44166/2 + externalProviderConfig := ` +terraform { + required_providers { + ec = { + source = "elastic/ec" + version = "0.4.1" + } + } +}` + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + CheckDestroy: testAccDeploymentTrafficFilterDestroy, + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "ec": { + VersionConstraint: "0.4.1", + Source: "elastic/ec", + }, + }, + Config: cfg + externalProviderConfig, + Check: checkBasicDeploymentTrafficFilterResource(resName, randomName, + resource.TestCheckResourceAttr(resName, "include_by_default", "false"), + resource.TestCheckResourceAttr(resName, "type", "ip"), + resource.TestCheckResourceAttr(resName, "rule.#", "1"), + resource.TestCheckResourceAttr(resName, "rule.0.source", "0.0.0.0/0"), + ), + }, + { + PlanOnly: true, + ProtoV6ProviderFactories: testAccProviderFactory, + Config: cfg, + Check: checkBasicDeploymentTrafficFilterResource(resName, randomName, + resource.TestCheckResourceAttr(resName, "include_by_default", "false"), + resource.TestCheckResourceAttr(resName, "type", "ip"), + resource.TestCheckResourceAttr(resName, "rule.#", "1"), + resource.TestCheckResourceAttr(resName, "rule.0.source", "0.0.0.0/0"), + ), + }, + }, + }) +} + func fixtureAccDeploymentTrafficFilterResourceBasic(t *testing.T, fileName, name, region string) string { t.Helper() b, err := os.ReadFile(fileName) diff --git a/ec/acc/deployment_with_extension_bundle_test.go b/ec/acc/deployment_with_extension_bundle_test.go index 203994f1e..f6b3174ac 100644 --- a/ec/acc/deployment_with_extension_bundle_test.go +++ b/ec/acc/deployment_with_extension_bundle_test.go @@ -45,7 +45,7 @@ func TestAccDeployment_withExtension(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - ProtoV5ProviderFactories: testAccProviderFactory, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: func(s *terraform.State) error { merr := multierror.NewPrefixed("checking resource with extension") diff --git a/ec/ecresource/trafficfilterassocresource/create.go b/ec/ecresource/trafficfilterassocresource/create.go index 3046ab256..7957271ad 100644 --- a/ec/ecresource/trafficfilterassocresource/create.go +++ b/ec/ecresource/trafficfilterassocresource/create.go @@ -28,13 +28,7 @@ import ( ) func (r Resource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { - // Prevent panic if the provider has not been configured. - if r.client == nil { - response.Diagnostics.AddError( - "Unconfigured API Client", - "Expected configured API client. Please report this issue to the provider developers.", - ) - + if !resourceReady(r, &response.Diagnostics) { return } diff --git a/ec/ecresource/trafficfilterassocresource/delete.go b/ec/ecresource/trafficfilterassocresource/delete.go index 0f2b2233d..c2fd7e321 100644 --- a/ec/ecresource/trafficfilterassocresource/delete.go +++ b/ec/ecresource/trafficfilterassocresource/delete.go @@ -28,13 +28,7 @@ import ( ) func (r Resource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { - // Prevent panic if the provider has not been configured. - if r.client == nil { - response.Diagnostics.AddError( - "Unconfigured API Client", - "Expected configured API client. Please report this issue to the provider developers.", - ) - + if !resourceReady(r, &response.Diagnostics) { return } diff --git a/ec/ecresource/trafficfilterassocresource/read.go b/ec/ecresource/trafficfilterassocresource/read.go index 633f34be3..588172115 100644 --- a/ec/ecresource/trafficfilterassocresource/read.go +++ b/ec/ecresource/trafficfilterassocresource/read.go @@ -28,13 +28,7 @@ import ( ) func (r Resource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { - // Prevent panic if the provider has not been configured. - if r.client == nil { - response.Diagnostics.AddError( - "Unconfigured API Client", - "Expected configured API client. Please report this issue to the provider developers.", - ) - + if !resourceReady(r, &response.Diagnostics) { return } diff --git a/ec/ecresource/trafficfilterassocresource/resource_test.go b/ec/ecresource/trafficfilterassocresource/resource_test.go index ca254494e..3cb131a34 100644 --- a/ec/ecresource/trafficfilterassocresource/resource_test.go +++ b/ec/ecresource/trafficfilterassocresource/resource_test.go @@ -23,7 +23,7 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-framework/providerserver" - "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" r "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/elastic/cloud-sdk-go/pkg/api" @@ -35,7 +35,7 @@ import ( func TestResourceTrafficFilterAssoc(t *testing.T) { r.UnitTest(t, r.TestCase{ - ProtoV5ProviderFactories: protoV5ProviderFactoriesWithMockClient( + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( api.NewMock( createResponse(), readResponse(), @@ -65,7 +65,7 @@ func TestResourceTrafficFilterAssoc(t *testing.T) { func TestResourceTrafficFilterAssoc_externalDeletion1(t *testing.T) { r.UnitTest(t, r.TestCase{ - ProtoV5ProviderFactories: protoV5ProviderFactoriesWithMockClient( + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( api.NewMock( createResponse(), readResponse(), @@ -86,9 +86,10 @@ func TestResourceTrafficFilterAssoc_externalDeletion1(t *testing.T) { }, }) } + func TestResourceTrafficFilterAssoc_externalDeletion2(t *testing.T) { r.UnitTest(t, r.TestCase{ - ProtoV5ProviderFactories: protoV5ProviderFactoriesWithMockClient( + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( api.NewMock( createResponse(), readResponse(), @@ -112,7 +113,7 @@ func TestResourceTrafficFilterAssoc_externalDeletion2(t *testing.T) { func TestResourceTrafficFilterAssoc_gracefulDeletion(t *testing.T) { r.UnitTest(t, r.TestCase{ - ProtoV5ProviderFactories: protoV5ProviderFactoriesWithMockClient( + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( api.NewMock( createResponse(), readResponse(), @@ -135,7 +136,7 @@ func TestResourceTrafficFilterAssoc_gracefulDeletion(t *testing.T) { func TestResourceTrafficFilterAssoc_failedDeletion(t *testing.T) { r.UnitTest(t, r.TestCase{ - ProtoV5ProviderFactories: protoV5ProviderFactoriesWithMockClient( + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( api.NewMock( createResponse(), readResponse(), @@ -159,7 +160,7 @@ func TestResourceTrafficFilterAssoc_failedDeletion(t *testing.T) { func TestResourceTrafficFilterAssoc_importState(t *testing.T) { r.UnitTest(t, r.TestCase{ - ProtoV5ProviderFactories: protoV5ProviderFactoriesWithMockClient( + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( api.NewMock( readResponse(), ), @@ -303,6 +304,7 @@ func alreadyDeletedResponse() mock.Response { mock.NewStringBody(`{ }`), ) } + func failedDeletionResponse() mock.Response { mock.SampleInternalError() return mock.New500ResponseAssertion( @@ -317,10 +319,10 @@ func failedDeletionResponse() mock.Response { ) } -func protoV5ProviderFactoriesWithMockClient(client *api.API) map[string]func() (tfprotov5.ProviderServer, error) { - return map[string]func() (tfprotov5.ProviderServer, error){ - "ec": func() (tfprotov5.ProviderServer, error) { - return providerserver.NewProtocol5(ec.ProviderWithClient(client, "unit-tests"))(), nil +func protoV6ProviderFactoriesWithMockClient(client *api.API) map[string]func() (tfprotov6.ProviderServer, error) { + return map[string]func() (tfprotov6.ProviderServer, error){ + "ec": func() (tfprotov6.ProviderServer, error) { + return providerserver.NewProtocol6(ec.ProviderWithClient(client, "unit-tests"))(), nil }, } } diff --git a/ec/ecresource/trafficfilterassocresource/schema.go b/ec/ecresource/trafficfilterassocresource/schema.go index 84f1e3f7e..5ced1a99c 100644 --- a/ec/ecresource/trafficfilterassocresource/schema.go +++ b/ec/ecresource/trafficfilterassocresource/schema.go @@ -37,8 +37,6 @@ var _ resource.ResourceWithGetSchema = &Resource{} var _ resource.ResourceWithImportState = &Resource{} var _ resource.ResourceWithMetadata = &Resource{} -type ResourceType struct{} - const entityTypeDeployment = "deployment" func (r *Resource) GetSchema(_ context.Context) (tfsdk.Schema, diag.Diagnostics) { @@ -74,6 +72,18 @@ type Resource struct { client *api.API } +func resourceReady(r Resource, dg *diag.Diagnostics) bool { + if r.client == nil { + dg.AddError( + "Unconfigured API Client", + "Expected configured API client. Please report this issue to the provider developers.", + ) + + return false + } + return true +} + func (r *Resource) Configure(ctx context.Context, request resource.ConfigureRequest, response *resource.ConfigureResponse) { client, diags := internal.ConvertProviderData(request.ProviderData) response.Diagnostics.Append(diags...) diff --git a/ec/ecresource/trafficfilterresource/create.go b/ec/ecresource/trafficfilterresource/create.go index 0e535b7bc..bc3f8614f 100644 --- a/ec/ecresource/trafficfilterresource/create.go +++ b/ec/ecresource/trafficfilterresource/create.go @@ -20,23 +20,51 @@ package trafficfilterresource import ( "context" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" ) // Create will create a new deployment traffic filter ruleset -func create(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var client = meta.(*api.API) +func (r Resource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + if !resourceReady(r, &response.Diagnostics) { + return + } + + var newState modelV0 + + diags := request.Plan.Get(ctx, &newState) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + trafficFilterRulesetRequest, diags := expandModel(ctx, newState) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + res, err := trafficfilterapi.Create(trafficfilterapi.CreateParams{ - API: client, Req: expandModel(d), + API: r.client, Req: trafficFilterRulesetRequest, }) if err != nil { - return diag.FromErr(err) + response.Diagnostics.AddError(err.Error(), err.Error()) + return + } + + newState.ID = types.String{Value: *res.ID} + + found, diags := r.read(ctx, newState.ID.Value, &newState) + response.Diagnostics.Append(diags...) + if !found { + response.State.RemoveResource(ctx) + } + if response.Diagnostics.HasError() { + return } - d.SetId(*res.ID) - return read(ctx, d, meta) + // Finally, set the state + response.Diagnostics.Append(response.State.Set(ctx, newState)...) } diff --git a/ec/ecresource/trafficfilterresource/delete.go b/ec/ecresource/trafficfilterresource/delete.go index 5723eace8..f5a2206b5 100644 --- a/ec/ecresource/trafficfilterresource/delete.go +++ b/ec/ecresource/trafficfilterresource/delete.go @@ -21,10 +21,8 @@ import ( "context" "errors" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/resource" - "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" "github.com/elastic/cloud-sdk-go/pkg/client/deployments_traffic_filter" @@ -32,43 +30,51 @@ import ( ) // Delete will delete an existing deployment traffic filter ruleset -func delete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var client = meta.(*api.API) +func (r Resource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + if !resourceReady(r, &response.Diagnostics) { + return + } + + var state modelV0 + + diags := request.State.Get(ctx, &state) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } res, err := trafficfilterapi.Get(trafficfilterapi.GetParams{ - API: client, ID: d.Id(), IncludeAssociations: true, + API: r.client, ID: state.ID.Value, IncludeAssociations: true, }) if err != nil { - if util.TrafficFilterNotFound(err) { - d.SetId("") - return nil + if !util.TrafficFilterNotFound(err) { + response.Diagnostics.AddError(err.Error(), err.Error()) } - return diag.FromErr(err) + return } for _, assoc := range res.Associations { if err := trafficfilterapi.DeleteAssociation(trafficfilterapi.DeleteAssociationParams{ - API: client, - ID: d.Id(), + API: r.client, + ID: state.ID.Value, EntityID: *assoc.ID, EntityType: *assoc.EntityType, }); err != nil { if !associationDeleted(err) { - return diag.FromErr(err) + response.Diagnostics.AddError(err.Error(), err.Error()) + return } } } if err := trafficfilterapi.Delete(trafficfilterapi.DeleteParams{ - API: client, ID: d.Id(), + API: r.client, ID: state.ID.Value, }); err != nil { if !ruleDeleted(err) { - return diag.FromErr(err) + response.Diagnostics.AddError(err.Error(), err.Error()) + return } } - - d.SetId("") - return nil } func associationDeleted(err error) bool { diff --git a/ec/ecresource/trafficfilterresource/delete_test.go b/ec/ecresource/trafficfilterresource/delete_test.go deleted file mode 100644 index 451835014..000000000 --- a/ec/ecresource/trafficfilterresource/delete_test.go +++ /dev/null @@ -1,218 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package trafficfilterresource - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -func Test_delete(t *testing.T) { - tc500Err := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleTrafficFilter(), - Schema: newSchema(), - }) - wantTC500 := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleTrafficFilter(), - Schema: newSchema(), - }) - - tc404Err := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleTrafficFilter(), - Schema: newSchema(), - }) - wantTC404 := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleTrafficFilter(), - Schema: newSchema(), - }) - wantTC404.SetId("") - - tc404AssocErr := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleTrafficFilter(), - Schema: newSchema(), - }) - wantTC404Assoc := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleTrafficFilter(), - Schema: newSchema(), - }) - - tc404DeleteErr := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleTrafficFilter(), - Schema: newSchema(), - }) - wantTC404Delete := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleTrafficFilter(), - Schema: newSchema(), - }) - wantTC404Delete.SetId("") - - tc500DeleteErr := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleTrafficFilter(), - Schema: newSchema(), - }) - wantTC500Delete := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleTrafficFilter(), - Schema: newSchema(), - }) - type args struct { - ctx context.Context - d *schema.ResourceData - meta interface{} - } - tests := []struct { - name string - args args - want diag.Diagnostics - wantRD *schema.ResourceData - }{ - { - name: "returns an error when it receives a 500", - args: args{ - d: tc500Err, - meta: api.NewMock(mock.NewErrorResponse(500, mock.APIError{ - Code: "some", Message: "message", - })), - }, - want: diag.Diagnostics{ - { - Severity: diag.Error, - Summary: "api error: 1 error occurred:\n\t* some: message\n\n", - }, - }, - wantRD: wantTC500, - }, - { - name: "returns nil and unsets the state when the error is known", - args: args{ - d: tc404Err, - meta: api.NewMock(mock.NewErrorResponse(404, mock.APIError{ - Code: "some", Message: "message", - })), - }, - want: nil, - wantRD: wantTC404, - }, - { - name: "returns error when the error is unknown", - args: args{ - d: tc404AssocErr, - meta: api.NewMock( - mock.New200StructResponse(models.TrafficFilterRulesetInfo{ - Associations: []*models.FilterAssociation{ - {ID: ec.String("some id"), EntityType: ec.String("deployment")}, - }, - }), - mock.NewErrorResponse(500, mock.APIError{ - Code: "some", Message: "message", - }), - ), - }, - want: diag.Diagnostics{ - { - Summary: "api error: 1 error occurred:\n\t* some: message\n\n", - }, - }, - wantRD: wantTC404Assoc, - }, - { - name: "returns nil and unsets the state when the error is known", - args: args{ - d: tc404DeleteErr, - meta: api.NewMock( - mock.New200StructResponse(models.TrafficFilterRulesetInfo{ - Associations: []*models.FilterAssociation{ - {ID: ec.String("some id"), EntityType: ec.String("deployment")}, - }, - }), - mock.NewErrorResponse(404, mock.APIError{ - Code: "some", Message: "message", - }), - mock.New200StructResponse(map[string]interface{}{}), - ), - }, - want: nil, - wantRD: wantTC404Delete, - }, - { - name: "returns error when the delete returns a 500 error", - args: args{ - d: tc500DeleteErr, - meta: api.NewMock( - mock.New200StructResponse(models.TrafficFilterRulesetInfo{ - Associations: []*models.FilterAssociation{ - {ID: ec.String("some id"), EntityType: ec.String("deployment")}, - }, - }), - mock.NewErrorResponse(404, mock.APIError{ - Code: "some", Message: "message", - }), - mock.NewErrorResponse(500, mock.APIError{ - Code: "overload", Message: "server at capacity", - }), - ), - }, - want: diag.Diagnostics{ - { - Summary: "api error: 1 error occurred:\n\t* overload: server at capacity\n\n", - }, - }, - wantRD: wantTC500Delete, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := delete(tt.args.ctx, tt.args.d, tt.args.meta) - assert.Equal(t, tt.want, got) - var want interface{} - if tt.wantRD != nil { - if s := tt.wantRD.State(); s != nil { - want = s.Attributes - } - } - - var gotState interface{} - if s := tt.args.d.State(); s != nil { - gotState = s.Attributes - } - - assert.Equal(t, want, gotState) - }) - } -} diff --git a/ec/ecresource/trafficfilterresource/expanders.go b/ec/ecresource/trafficfilterresource/expanders.go index 2cb57a10e..a60a9721d 100644 --- a/ec/ecresource/trafficfilterresource/expanders.go +++ b/ec/ecresource/trafficfilterresource/expanders.go @@ -18,47 +18,54 @@ package trafficfilterresource import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" ) -func expandModel(d *schema.ResourceData) *models.TrafficFilterRulesetRequest { - var ruleSet = d.Get("rule").(*schema.Set) +func expandModel(ctx context.Context, state modelV0) (*models.TrafficFilterRulesetRequest, diag.Diagnostics) { + var diags diag.Diagnostics + + ruleSet := make([]trafficFilterRuleModelV0, 0, len(state.Rule.Elems)) + diags.Append(state.Rule.ElementsAs(ctx, &ruleSet, false)...) + if diags.HasError() { + return nil, diags + } + var request = models.TrafficFilterRulesetRequest{ - Name: ec.String(d.Get("name").(string)), - Type: ec.String(d.Get("type").(string)), - Region: ec.String(d.Get("region").(string)), - Description: d.Get("description").(string), - IncludeByDefault: ec.Bool(d.Get("include_by_default").(bool)), - Rules: make([]*models.TrafficFilterRule, 0, ruleSet.Len()), + Name: ec.String(state.Name.Value), + Type: ec.String(state.Type.Value), + Region: ec.String(state.Region.Value), + Description: state.Description.Value, + IncludeByDefault: ec.Bool(state.IncludeByDefault.Value), + Rules: make([]*models.TrafficFilterRule, 0, len(ruleSet)), } - for _, r := range ruleSet.List() { - var m = r.(map[string]interface{}) + for _, r := range ruleSet { var rule = models.TrafficFilterRule{ - Source: m["source"].(string), + Source: r.Source.Value, } - if val, ok := m["id"]; ok { - rule.ID = val.(string) + if !r.ID.IsNull() && !r.ID.IsUnknown() { + rule.ID = r.ID.Value } - if val, ok := m["description"]; ok { - rule.Description = val.(string) + if !r.Description.IsNull() && !r.Description.IsUnknown() { + rule.Description = r.Description.Value } - if val, ok := m["azure_endpoint_name"]; ok { - rule.AzureEndpointName = val.(string) + if !r.AzureEndpointName.IsNull() && !r.AzureEndpointName.IsUnknown() { + rule.AzureEndpointName = r.AzureEndpointName.Value } - - if val, ok := m["azure_endpoint_guid"]; ok { - rule.AzureEndpointGUID = val.(string) + if !r.AzureEndpointGUID.IsNull() && !r.AzureEndpointGUID.IsUnknown() { + rule.AzureEndpointGUID = r.AzureEndpointGUID.Value } request.Rules = append(request.Rules, &rule) } - return &request + return &request, diags } diff --git a/ec/ecresource/trafficfilterresource/expanders_test.go b/ec/ecresource/trafficfilterresource/expanders_test.go index 9fb1746c7..d788eb302 100644 --- a/ec/ecresource/trafficfilterresource/expanders_test.go +++ b/ec/ecresource/trafficfilterresource/expanders_test.go @@ -18,49 +18,39 @@ package trafficfilterresource import ( + "context" "testing" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/stretchr/testify/assert" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_expandModel(t *testing.T) { - trafficFilterRD := util.NewResourceData(t, util.ResDataParams{ - ID: "some-random-id", - State: newSampleTrafficFilter(), - Schema: newSchema(), - }) - trafficFilterMultipleRD := util.NewResourceData(t, util.ResDataParams{ - ID: "some-random-id", - State: map[string]interface{}{ - "name": "my traffic filter", - "type": "ip", - "include_by_default": false, - "region": "us-east-1", - "rule": []interface{}{ - map[string]interface{}{ - "source": "1.1.1.1/24", - }, - map[string]interface{}{ - "source": "1.1.1.0/16", - }, - map[string]interface{}{ - "source": "0.0.0.0/0", - }, - map[string]interface{}{ - "source": "1.1.1.1", - }, + trafficFilterRD := newSampleTrafficFilter("some-random-id") + + trafficFilterMultipleRD := modelV0{ + ID: types.String{Value: "some-random-id"}, + Name: types.String{Value: "my traffic filter"}, + Type: types.String{Value: "ip"}, + IncludeByDefault: types.Bool{Value: false}, + Region: types.String{Value: "us-east-1"}, + Rule: types.Set{ + ElemType: trafficFilterRuleElemType(), + Elems: []attr.Value{ + newSampleTrafficFilterRule("1.1.1.1/24", "", "", "", ""), + newSampleTrafficFilterRule("1.1.1.0/16", "", "", "", ""), + newSampleTrafficFilterRule("0.0.0.0/0", "", "", "", ""), + newSampleTrafficFilterRule("1.1.1.1", "", "", "", ""), }, }, - Schema: newSchema(), - }) + } type args struct { - d *schema.ResourceData + state modelV0 } tests := []struct { name string @@ -69,50 +59,51 @@ func Test_expandModel(t *testing.T) { }{ { name: "parses the resource", - args: args{d: trafficFilterRD}, + args: args{state: trafficFilterRD}, want: &models.TrafficFilterRulesetRequest{ Name: ec.String("my traffic filter"), Type: ec.String("ip"), IncludeByDefault: ec.Bool(false), Region: ec.String("us-east-1"), Rules: []*models.TrafficFilterRule{ - {Source: "0.0.0.0/0"}, {Source: "1.1.1.1"}, + {Source: "0.0.0.0/0"}, }, }, }, { name: "parses the resource with a lot of traffic rules", - args: args{d: trafficFilterMultipleRD}, + args: args{state: trafficFilterMultipleRD}, want: &models.TrafficFilterRulesetRequest{ Name: ec.String("my traffic filter"), Type: ec.String("ip"), IncludeByDefault: ec.Bool(false), Region: ec.String("us-east-1"), Rules: []*models.TrafficFilterRule{ + {Source: "1.1.1.1/24"}, + {Source: "1.1.1.0/16"}, {Source: "0.0.0.0/0"}, {Source: "1.1.1.1"}, - {Source: "1.1.1.0/16"}, - {Source: "1.1.1.1/24"}, }, }, }, { name: "parses an Azure privatelink resource", - args: args{d: util.NewResourceData(t, util.ResDataParams{ - ID: "some-random-id", - State: map[string]interface{}{ - "name": "my traffic filter", - "type": "azure_private_endpoint", - "include_by_default": false, - "region": "azure-australiaeast", - "rule": []interface{}{map[string]interface{}{ - "azure_endpoint_guid": "1231312-1231-1231-1231-1231312", - "azure_endpoint_name": "my-azure-pl", - }}, + args: args{ + state: modelV0{ + ID: types.String{Value: "some-random-id"}, + Name: types.String{Value: "my traffic filter"}, + Type: types.String{Value: "azure_private_endpoint"}, + IncludeByDefault: types.Bool{Value: false}, + Region: types.String{Value: "azure-australiaeast"}, + Rule: types.Set{ + ElemType: trafficFilterRuleElemType(), + Elems: []attr.Value{ + newSampleTrafficFilterRule("", "", "my-azure-pl", "1231312-1231-1231-1231-1231312", ""), + }, + }, }, - Schema: newSchema(), - })}, + }, want: &models.TrafficFilterRulesetRequest{ Name: ec.String("my traffic filter"), Type: ec.String("azure_private_endpoint"), @@ -129,7 +120,8 @@ func Test_expandModel(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := expandModel(tt.args.d) + got, diags := expandModel(context.Background(), tt.args.state) + assert.Empty(t, diags) assert.Equal(t, tt.want, got) }) } diff --git a/ec/ecresource/trafficfilterresource/flatteners.go b/ec/ecresource/trafficfilterresource/flatteners.go index 2950da74b..d44c24a9e 100644 --- a/ec/ecresource/trafficfilterresource/flatteners.go +++ b/ec/ecresource/trafficfilterresource/flatteners.go @@ -18,67 +18,66 @@ package trafficfilterresource import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/elastic/cloud-sdk-go/pkg/models" ) -func modelToState(d *schema.ResourceData, res *models.TrafficFilterRulesetInfo) error { - if err := d.Set("name", *res.Name); err != nil { - return err - } - - if err := d.Set("region", *res.Region); err != nil { - return err - } +func modelToState(ctx context.Context, res *models.TrafficFilterRulesetInfo, state *modelV0) diag.Diagnostics { + var diags diag.Diagnostics - if err := d.Set("type", *res.Type); err != nil { - return err - } - - if err := d.Set("rule", flattenRules(res.Rules)); err != nil { - return err - } + state.Name = types.String{Value: *res.Name} + state.Region = types.String{Value: *res.Region} + state.Type = types.String{Value: *res.Type} + state.IncludeByDefault = types.Bool{Value: *res.IncludeByDefault} - if err := d.Set("include_by_default", res.IncludeByDefault); err != nil { - return err - } + diags.Append(flattenRules(ctx, res.Rules, &state.Rule)...) - if res.Description != "" { - if err := d.Set("description", res.Description); err != nil { - return err - } + if res.Description == "" { + state.Description = types.String{Null: true} + } else { + state.Description = types.String{Value: res.Description} } - return nil + return diags } -func flattenRules(rules []*models.TrafficFilterRule) *schema.Set { - result := schema.NewSet(trafficFilterRuleHash, []interface{}{}) +func flattenRules(ctx context.Context, rules []*models.TrafficFilterRule, target interface{}) diag.Diagnostics { + var diags diag.Diagnostics + var result = make([]trafficFilterRuleModelV0, 0, len(rules)) for _, rule := range rules { - var m = make(map[string]interface{}) - if rule.Source != "" { - m["source"] = rule.Source + model := trafficFilterRuleModelV0{ + ID: types.String{Value: rule.ID}, + Source: types.String{Null: true}, + Description: types.String{Null: true}, + AzureEndpointGUID: types.String{Null: true}, + AzureEndpointName: types.String{Null: true}, } - if rule.Description != "" { - m["description"] = rule.Description + if rule.Source != "" { + model.Source = types.String{Value: rule.Source} } - if rule.ID != "" { - m["id"] = rule.ID + if rule.Description != "" { + model.Description = types.String{Value: rule.Description} } if rule.AzureEndpointGUID != "" { - m["azure_endpoint_guid"] = rule.AzureEndpointGUID + model.AzureEndpointGUID = types.String{Value: rule.AzureEndpointGUID} } if rule.AzureEndpointName != "" { - m["azure_endpoint_name"] = rule.AzureEndpointName + model.AzureEndpointName = types.String{Value: rule.AzureEndpointName} } - result.Add(m) + result = append(result, model) } - return result + diags.Append(tfsdk.ValueFrom(ctx, result, trafficFilterRuleSetType(), target)...) + + return diags } diff --git a/ec/ecresource/trafficfilterresource/flatteners_test.go b/ec/ecresource/trafficfilterresource/flatteners_test.go index a85e52f4f..a8e014769 100644 --- a/ec/ecresource/trafficfilterresource/flatteners_test.go +++ b/ec/ecresource/trafficfilterresource/flatteners_test.go @@ -18,21 +18,19 @@ package trafficfilterresource import ( + "context" "testing" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/stretchr/testify/assert" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_modelToState(t *testing.T) { - trafficFilterSchemaArg := schema.TestResourceDataRaw(t, newSchema(), nil) - trafficFilterSchemaArg.SetId("some-random-id") - remoteState := models.TrafficFilterRulesetInfo{ ID: ec.String("some-random-id"), Name: ec.String("my traffic filter"), @@ -45,9 +43,6 @@ func Test_modelToState(t *testing.T) { }, } - trafficFilterSchemaArgMultipleR := schema.TestResourceDataRaw(t, newSchema(), nil) - trafficFilterSchemaArgMultipleR.SetId("some-random-id") - remoteStateMultipleRules := models.TrafficFilterRulesetInfo{ ID: ec.String("some-random-id"), Name: ec.String("my traffic filter"), @@ -62,15 +57,13 @@ func Test_modelToState(t *testing.T) { }, } - trafficFilterSchemaArgMultipleRWithDesc := schema.TestResourceDataRaw(t, newSchema(), nil) - trafficFilterSchemaArgMultipleRWithDesc.SetId("some-random-id") - remoteStateMultipleRulesWithDesc := models.TrafficFilterRulesetInfo{ ID: ec.String("some-random-id"), Name: ec.String("my traffic filter"), Type: ec.String("ip"), IncludeByDefault: ec.Bool(false), Region: ec.String("us-east-1"), + Description: *ec.String("Allows access to some network, a specific IP and all internet traffic"), Rules: []*models.TrafficFilterRule{ {Source: "1.1.1.0/16", Description: "some network"}, {Source: "1.1.1.1/24", Description: "a specific IP"}, @@ -78,64 +71,42 @@ func Test_modelToState(t *testing.T) { }, } - wantTrafficFilter := util.NewResourceData(t, util.ResDataParams{ - ID: "some-random-id", - State: newSampleTrafficFilter(), - Schema: newSchema(), - }) - wantTrafficFilterMultipleR := util.NewResourceData(t, util.ResDataParams{ - ID: "some-random-id", - State: map[string]interface{}{ - "name": "my traffic filter", - "type": "ip", - "include_by_default": false, - "region": "us-east-1", - "rule": []interface{}{ - map[string]interface{}{ - "source": "1.1.1.1/24", - }, - map[string]interface{}{ - "source": "1.1.1.0/16", - }, - map[string]interface{}{ - "source": "0.0.0.0/0", - }, - map[string]interface{}{ - "source": "1.1.1.1", - }, + want := newSampleTrafficFilter("some-random-id") + wantMultipleRules := modelV0{ + ID: types.String{Value: "some-random-id"}, + Name: types.String{Value: "my traffic filter"}, + Type: types.String{Value: "ip"}, + IncludeByDefault: types.Bool{Value: false}, + Region: types.String{Value: "us-east-1"}, + Description: types.String{Null: true}, + Rule: types.Set{ + ElemType: trafficFilterRuleElemType(), + Elems: []attr.Value{ + newSampleTrafficFilterRule("1.1.1.0/16", "", "", "", ""), + newSampleTrafficFilterRule("1.1.1.1/24", "", "", "", ""), + newSampleTrafficFilterRule("0.0.0.0/0", "", "", "", ""), + newSampleTrafficFilterRule("1.1.1.1", "", "", "", ""), }, }, - Schema: newSchema(), - }) - wantTrafficFilterMultipleRWithDesc := util.NewResourceData(t, util.ResDataParams{ - ID: "some-random-id", - State: map[string]interface{}{ - "name": "my traffic filter", - "type": "ip", - "include_by_default": false, - "region": "us-east-1", - "rule": []interface{}{ - map[string]interface{}{ - "source": "1.1.1.1/24", - "description": "a specific IP", - }, - map[string]interface{}{ - "source": "1.1.1.0/16", - "description": "some network", - }, - map[string]interface{}{ - "source": "0.0.0.0/0", - "description": "all internet traffic", - }, + } + wantMultipleRulesWithDesc := modelV0{ + ID: types.String{Value: "some-random-id"}, + Name: types.String{Value: "my traffic filter"}, + Type: types.String{Value: "ip"}, + IncludeByDefault: types.Bool{Value: false}, + Region: types.String{Value: "us-east-1"}, + Description: types.String{Value: "Allows access to some network, a specific IP and all internet traffic"}, + Rule: types.Set{ + ElemType: trafficFilterRuleElemType(), + Elems: []attr.Value{ + newSampleTrafficFilterRule("1.1.1.0/16", "some network", "", "", ""), + newSampleTrafficFilterRule("1.1.1.1/24", "a specific IP", "", "", ""), + newSampleTrafficFilterRule("0.0.0.0/0", "all internet traffic", "", "", ""), }, }, - Schema: newSchema(), - }) - - azurePLSchemaArg := schema.TestResourceDataRaw(t, newSchema(), nil) - azurePLSchemaArg.SetId("some-random-id") + } - azurePLRemoteState := models.TrafficFilterRulesetInfo{ + remoteStateAzurePL := models.TrafficFilterRulesetInfo{ ID: ec.String("some-random-id"), Name: ec.String("my traffic filter"), Type: ec.String("azure_private_endpoint"), @@ -149,60 +120,66 @@ func Test_modelToState(t *testing.T) { }, } + wantAzurePL := modelV0{ + ID: types.String{Value: "some-random-id"}, + Name: types.String{Value: "my traffic filter"}, + Type: types.String{Value: "azure_private_endpoint"}, + IncludeByDefault: types.Bool{Value: false}, + Region: types.String{Value: "azure-australiaeast"}, + Description: types.String{Null: true}, + Rule: types.Set{ + ElemType: trafficFilterRuleElemType(), + Elems: []attr.Value{ + newSampleTrafficFilterRule("", "", "my-azure-pl", "1231312-1231-1231-1231-1231312", ""), + }, + }, + } + type args struct { - d *schema.ResourceData - res *models.TrafficFilterRulesetInfo + in *models.TrafficFilterRulesetInfo } + tests := []struct { name string args args err error - want *schema.ResourceData + want modelV0 }{ { name: "flattens the resource", - args: args{d: trafficFilterSchemaArg, res: &remoteState}, - want: wantTrafficFilter, + args: args{in: &remoteState}, + want: want, }, { name: "flattens the resource with multiple rules", - args: args{d: trafficFilterSchemaArgMultipleR, res: &remoteStateMultipleRules}, - want: wantTrafficFilterMultipleR, + args: args{in: &remoteStateMultipleRules}, + want: wantMultipleRules, }, { name: "flattens the resource with multiple rules with descriptions", - args: args{d: trafficFilterSchemaArgMultipleRWithDesc, res: &remoteStateMultipleRulesWithDesc}, - want: wantTrafficFilterMultipleRWithDesc, + args: args{in: &remoteStateMultipleRulesWithDesc}, + want: wantMultipleRulesWithDesc, }, { name: "flattens the resource with multiple rules with descriptions", - args: args{d: azurePLSchemaArg, res: &azurePLRemoteState}, - want: util.NewResourceData(t, util.ResDataParams{ - ID: "some-random-id", - State: map[string]interface{}{ - "name": "my traffic filter", - "type": "azure_private_endpoint", - "include_by_default": false, - "region": "azure-australiaeast", - "rule": []interface{}{map[string]interface{}{ - "azure_endpoint_guid": "1231312-1231-1231-1231-1231312", - "azure_endpoint_name": "my-azure-pl", - }}, - }, - Schema: newSchema(), - }), + args: args{in: &remoteStateAzurePL}, + want: wantAzurePL, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := modelToState(tt.args.d, tt.args.res) + state := modelV0{ + ID: types.String{Value: "some-random-id"}, + } + diags := modelToState(context.Background(), tt.args.in, &state) + if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) + assert.Equal(t, diags, tt.err) } else { - assert.NoError(t, err) + assert.Empty(t, diags) } - assert.Equal(t, tt.want.State().Attributes, tt.args.d.State().Attributes) + assert.Equal(t, tt.want, state) }) } } diff --git a/ec/ecresource/trafficfilterresource/read.go b/ec/ecresource/trafficfilterresource/read.go index 1570508b9..520f05e8c 100644 --- a/ec/ecresource/trafficfilterresource/read.go +++ b/ec/ecresource/trafficfilterresource/read.go @@ -20,34 +20,55 @@ package trafficfilterresource import ( "context" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/resource" - "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) -// Read queries the remote deployment traffic filter ruleset state and update +// Read queries the remote deployment traffic filter ruleset state and updates // the local state. -func read(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var client = meta.(*api.API) +func (r Resource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + if !resourceReady(r, &response.Diagnostics) { + return + } + + var newState modelV0 + + diags := request.State.Get(ctx, &newState) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + found, diags := r.read(ctx, newState.ID.Value, &newState) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + if !found { + response.State.RemoveResource(ctx) + return + } + + // Finally, set the state + response.Diagnostics.Append(response.State.Set(ctx, newState)...) +} + +func (r Resource) read(ctx context.Context, id string, state *modelV0) (found bool, diags diag.Diagnostics) { res, err := trafficfilterapi.Get(trafficfilterapi.GetParams{ - API: client, ID: d.Id(), + API: r.client, ID: id, IncludeAssociations: false, }) if err != nil { if util.TrafficFilterNotFound(err) { - d.SetId("") - return nil + return false, diags } - return diag.FromErr(err) - } - - if err := modelToState(d, res); err != nil { - return diag.FromErr(err) + diags.AddError(err.Error(), err.Error()) + return true, diags } - return nil + diags.Append(modelToState(ctx, res, state)...) + return true, diags } diff --git a/ec/ecresource/trafficfilterresource/read_test.go b/ec/ecresource/trafficfilterresource/read_test.go deleted file mode 100644 index ec765c9a5..000000000 --- a/ec/ecresource/trafficfilterresource/read_test.go +++ /dev/null @@ -1,116 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package trafficfilterresource - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -func Test_read(t *testing.T) { - tc500Err := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleTrafficFilter(), - Schema: newSchema(), - }) - wantTC500 := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleTrafficFilter(), - Schema: newSchema(), - }) - - tc404Err := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleTrafficFilter(), - Schema: newSchema(), - }) - wantTC404 := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleTrafficFilter(), - Schema: newSchema(), - }) - wantTC404.SetId("") - type args struct { - in0 context.Context - d *schema.ResourceData - meta interface{} - } - tests := []struct { - name string - args args - want diag.Diagnostics - wantRD *schema.ResourceData - }{ - { - name: "returns an error when it receives a 500", - args: args{ - d: tc500Err, - meta: api.NewMock(mock.NewErrorResponse(500, mock.APIError{ - Code: "some", Message: "message", - })), - }, - want: diag.Diagnostics{ - { - Severity: diag.Error, - Summary: "api error: 1 error occurred:\n\t* some: message\n\n", - }, - }, - wantRD: wantTC500, - }, - { - name: "returns nil and unsets the state when the error is known", - args: args{ - d: tc404Err, - meta: api.NewMock(mock.NewErrorResponse(404, mock.APIError{ - Code: "some", Message: "message", - })), - }, - want: nil, - wantRD: wantTC404, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := read(tt.args.in0, tt.args.d, tt.args.meta) - assert.Equal(t, tt.want, got) - var want interface{} - if tt.wantRD != nil { - if s := tt.wantRD.State(); s != nil { - want = s.Attributes - } - } - - var gotState interface{} - if s := tt.args.d.State(); s != nil { - gotState = s.Attributes - } - - assert.Equal(t, want, gotState) - }) - } -} diff --git a/ec/ecresource/trafficfilterresource/resource.go b/ec/ecresource/trafficfilterresource/resource.go deleted file mode 100644 index 25ebfa288..000000000 --- a/ec/ecresource/trafficfilterresource/resource.go +++ /dev/null @@ -1,44 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package trafficfilterresource - -import ( - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -// Resource returns the ec_deployment_traffic_filter resource schema. -func Resource() *schema.Resource { - return &schema.Resource{ - Description: "Elastic Cloud deployment traffic filtering rules", - Schema: newSchema(), - - CreateContext: create, - ReadContext: read, - UpdateContext: update, - DeleteContext: delete, - - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Timeouts: &schema.ResourceTimeout{ - Default: schema.DefaultTimeout(10 * time.Minute), - }, - } -} diff --git a/ec/ecresource/trafficfilterresource/resource_test.go b/ec/ecresource/trafficfilterresource/resource_test.go new file mode 100644 index 000000000..ac39c0cdd --- /dev/null +++ b/ec/ecresource/trafficfilterresource/resource_test.go @@ -0,0 +1,636 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package trafficfilterresource_test + +import ( + "net/url" + "regexp" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/providerserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + r "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + + provider "github.com/elastic/terraform-provider-ec/ec" +) + +func TestResourceTrafficFilter(t *testing.T) { + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + createResponse("true"), + readResponse("false", "true"), + readResponse("false", "true"), + readResponse("false", "true"), + readResponse("false", "true"), + readResponse("false", "true"), + updateResponse("false"), + readResponse("false", "false"), + readResponse("false", "false"), + readResponse("false", "false"), + readResponse("true", "false"), + deleteResponse(), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: trafficFilter, + Check: checkResource("true"), + }, + { // Ensure that it can be successfully read + PlanOnly: true, + Config: trafficFilter, + Check: checkResource("true"), + }, + { // Ensure that it can be successfully updated + Config: trafficFilterWithoutIncludeByDefault, + Check: checkResource("false"), + }, + { // Delete resource + Destroy: true, + Config: trafficFilter, + }, + }, + }) +} + +func TestResourceTrafficFilterWithoutIncludeByDefault(t *testing.T) { + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + createResponse("false"), + readResponse("false", "false"), + readResponse("false", "false"), + readResponse("false", "false"), + readResponse("false", "false"), + readResponse("false", "false"), + readResponse("true", "false"), + deleteResponse(), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: trafficFilterWithoutIncludeByDefault, + Check: checkResource("false"), + }, + { // Ensure that it can be successfully read + PlanOnly: true, + Config: trafficFilterWithoutIncludeByDefault, + Check: checkResource("false"), + }, + { // Delete resource + Destroy: true, + Config: trafficFilterWithoutIncludeByDefault, + }, + }, + }) +} + +func TestResourceTrafficFilter_failedRead1(t *testing.T) { + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + createResponse("true"), + readResponse("false", "true"), + failedReadResponse("false"), + notFoundReadResponse("true"), // required for cleanup + ), + ), + Steps: []r.TestStep{ + { + Config: trafficFilter, + ExpectError: regexp.MustCompile(`internal.server.error: There was an internal server error`), + }, + }, + }) +} + +func TestResourceTrafficFilter_gracefulDeletionOnUpdate(t *testing.T) { + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + createResponse("true"), + readResponse("false", "true"), + readResponse("false", "true"), + readResponse("false", "true"), + updateResponse("false"), + notFoundReadResponse("false"), + notFoundReadResponse("false"), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: trafficFilter, + Check: checkResource("true"), + }, + { // Update resource + Config: trafficFilterWithoutIncludeByDefault, + Check: checkResource("false"), // Update can't remove the resource, so it should stay the same. + ExpectNonEmptyPlan: true, // terraform refresh will detect the removed resource, so we will end up with a non-empty plan. + }, + }, + }) +} + +func TestResourceTrafficFilter_failedUpdate1(t *testing.T) { + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + createResponse("true"), + readResponse("false", "true"), + readResponse("false", "true"), + readResponse("false", "true"), + failedUpdateResponse("false"), + notFoundReadResponse("true"), // required for cleanup + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: trafficFilter, + Check: checkResource("true"), + }, + { // Update resource + Config: trafficFilterWithoutIncludeByDefault, + ExpectError: regexp.MustCompile(`internal.server.error: There was an internal server error`), + }, + }, + }) +} + +func TestResourceTrafficFilter_failedUpdate2(t *testing.T) { + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + createResponse("true"), + readResponse("false", "true"), + readResponse("false", "true"), + readResponse("false", "true"), + updateResponse("false"), + failedReadResponse("false"), + notFoundReadResponse("true"), // required for cleanup + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: trafficFilter, + Check: checkResource("true"), + }, + { // Update resource + Config: trafficFilterWithoutIncludeByDefault, + ExpectError: regexp.MustCompile(`internal.server.error: There was an internal server error`), + }, + }, + }) +} + +func TestResourceTrafficFilterAssoc_gracefulDeletionOnRead(t *testing.T) { + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + createResponse("true"), + readResponse("false", "true"), + readResponse("false", "true"), + notFoundReadResponse("false"), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: trafficFilter, + Check: checkResource("true"), + }, + { // Ensure that it gets unset if deleted externally + PlanOnly: true, + ExpectNonEmptyPlan: true, + Config: trafficFilter, + Check: checkResourceDeleted(), + }, + }, + }) +} + +func TestResourceTrafficFilter_gracefulDeletion1(t *testing.T) { + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + createResponse("true"), + readResponse("false", "true"), + readResponse("false", "true"), + readResponse("false", "true"), + readResponse("true", "true"), + alreadyDeletedResponse(), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: trafficFilter, + Check: checkResource("true"), + }, + { // Delete resource + Destroy: true, + Config: trafficFilter, + }, + }, + }) +} + +func TestResourceTrafficFilter_gracefulDeletion2(t *testing.T) { + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + createResponse("true"), + readResponse("false", "true"), + readResponse("false", "true"), + readResponse("false", "true"), + notFoundReadResponse("true"), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: trafficFilter, + Check: checkResource("true"), + }, + { // Delete resource + Destroy: true, + Config: trafficFilter, + }, + }, + }) +} + +func TestResourceTrafficFilter_failedDeletion1(t *testing.T) { + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + createResponse("true"), + readResponse("false", "true"), + readResponse("false", "true"), + readResponse("false", "true"), + readResponse("true", "true"), + failedDeletionResponse(), + notFoundReadResponse("true"), // required for cleanup + ), + ), + Steps: []r.TestStep{ + { + Config: trafficFilter, + }, + { + Destroy: true, + Config: trafficFilter, + ExpectError: regexp.MustCompile(`internal.server.error: There was an internal server error`), + }, + }, + }) +} + +func TestResourceTrafficFilter_failedDeletion2(t *testing.T) { + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + createResponse("true"), + readResponse("false", "true"), + readResponse("false", "true"), + readResponse("false", "true"), + failedReadResponse("true"), + readResponse("true", "true"), + deleteResponse(), + ), + ), + Steps: []r.TestStep{ + { + Config: trafficFilter, + }, + { + Destroy: true, + Config: trafficFilter, + ExpectError: regexp.MustCompile(`internal.server.error: There was an internal server error`), + }, + }, + }) +} + +func TestResourceTrafficFilter_deletionWithUnknownAssociationError(t *testing.T) { + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + createResponse("true"), + readResponse("false", "true"), + readResponse("false", "true"), + readResponse("false", "true"), + mock.New200StructResponse(models.TrafficFilterRulesetInfo{ + Associations: []*models.FilterAssociation{ + {ID: ec.String("some id"), EntityType: ec.String("deployment")}, + }, + }), + mock.NewErrorResponse(500, mock.APIError{ + Code: "some", Message: "message", + }), + readResponse("true", "true"), + alreadyDeletedResponse(), + ), + ), + Steps: []r.TestStep{ + { + Config: trafficFilter, + }, + { + Destroy: true, + Config: trafficFilter, + ExpectError: regexp.MustCompile(`some: message`), + }, + }, + }) +} + +func TestResourceTrafficFilter_deletionWithAssociationNotFound(t *testing.T) { + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + createResponse("true"), + readResponse("false", "true"), + readResponse("false", "true"), + readResponse("false", "true"), + mock.New200StructResponse(models.TrafficFilterRulesetInfo{ + Associations: []*models.FilterAssociation{ + {ID: ec.String("some id"), EntityType: ec.String("deployment")}, + }, + }), + mock.NewErrorResponse(404, mock.APIError{ + Code: "some", Message: "message", + }), + deleteResponse(), + ), + ), + Steps: []r.TestStep{ + { + Config: trafficFilter, + }, + { + Destroy: true, + Config: trafficFilter, + }, + }, + }) +} + +func TestResourceTrafficFilter_importState(t *testing.T) { + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + readResponse("false", "true"), + ), + ), + Steps: []r.TestStep{ + { + ImportState: true, + ImportStateId: "some-random-id", + ResourceName: "ec_deployment_traffic_filter.test1", + Config: trafficFilter, + Check: checkResource("true"), + }, + }, + }) +} + +const trafficFilter = ` + resource "ec_deployment_traffic_filter" "test1" { + name = "my traffic filter" + description = "Allow access from 1.1.1.1 and 1.1.1.0/16" + region = "us-east-1" + type = "ip" + + include_by_default = true + + rule { + source = "1.1.1.1" + } + rule { + source = "1.1.1.0/16" + } + } +` +const trafficFilterWithoutIncludeByDefault = ` + resource "ec_deployment_traffic_filter" "test1" { + name = "my traffic filter" + description = "Allow access from 1.1.1.1 and 1.1.1.0/16" + region = "us-east-1" + type = "ip" + + rule { + source = "1.1.1.1" + } + rule { + source = "1.1.1.0/16" + } + } +` + +func checkResource(includeByDefault string) r.TestCheckFunc { + resource := "ec_deployment_traffic_filter.test1" + return r.ComposeAggregateTestCheckFunc( + r.TestCheckResourceAttr(resource, "id", "some-random-id"), + r.TestCheckResourceAttr(resource, "name", "my traffic filter"), + r.TestCheckResourceAttr(resource, "description", "Allow access from 1.1.1.1 and 1.1.1.0/16"), + r.TestCheckResourceAttr(resource, "region", "us-east-1"), + r.TestCheckResourceAttr(resource, "type", "ip"), + r.TestCheckResourceAttr(resource, "include_by_default", includeByDefault), + r.TestCheckResourceAttr(resource, "rule.0.id", "some-random-rule-id-1"), + r.TestCheckResourceAttr(resource, "rule.0.source", "1.1.1.1"), + r.TestCheckResourceAttr(resource, "rule.1.id", "some-random-rule-id-2"), + r.TestCheckResourceAttr(resource, "rule.1.source", "1.1.1.0/16"), + ) +} + +func checkResourceDeleted() r.TestCheckFunc { + resource := "ec_deployment_traffic_filter.test1" + return r.ComposeAggregateTestCheckFunc( + r.TestCheckNoResourceAttr(resource, "id"), + r.TestCheckNoResourceAttr(resource, "name"), + r.TestCheckNoResourceAttr(resource, "description"), + r.TestCheckNoResourceAttr(resource, "region"), + r.TestCheckNoResourceAttr(resource, "type"), + r.TestCheckNoResourceAttr(resource, "include_by_default"), + r.TestCheckNoResourceAttr(resource, "rule.0.id"), + r.TestCheckNoResourceAttr(resource, "rule.0.source"), + r.TestCheckNoResourceAttr(resource, "rule.1.id"), + r.TestCheckNoResourceAttr(resource, "rule.1.source"), + ) +} + +func createResponse(includeByDefault string) mock.Response { + return mock.New201ResponseAssertion( + &mock.RequestAssertion{ + Host: api.DefaultMockHost, + Header: api.DefaultWriteMockHeaders, + Method: "POST", + Path: "/api/v1/deployments/traffic-filter/rulesets", + Query: url.Values{}, + Body: mock.NewStringBody(`{"description":"Allow access from 1.1.1.1 and 1.1.1.0/16","include_by_default":` + includeByDefault + `,"name":"my traffic filter","region":"us-east-1","rules":[{"source":"1.1.1.0/16"},{"source":"1.1.1.1"}],"type":"ip"}` + "\n"), + }, + mock.NewStringBody(`{"id" : "some-random-id"}`), + ) +} + +func updateResponse(includeByDefault string) mock.Response { + return mock.New200ResponseAssertion( + &mock.RequestAssertion{ + Host: api.DefaultMockHost, + Header: api.DefaultWriteMockHeaders, + Method: "PUT", + Path: "/api/v1/deployments/traffic-filter/rulesets/some-random-id", + Query: url.Values{}, + Body: mock.NewStringBody(`{"description":"Allow access from 1.1.1.1 and 1.1.1.0/16","include_by_default":` + includeByDefault + `,"name":"my traffic filter","region":"us-east-1","rules":[{"id":"some-random-rule-id-1","source":"1.1.1.1"},{"id":"some-random-rule-id-2","source":"1.1.1.0/16"}],"type":"ip"}` + "\n"), + }, + mock.NewStringBody(`{"id" : "some-random-id"}`), + ) +} + +func failedUpdateResponse(includeByDefault string) mock.Response { + return mock.New500ResponseAssertion( + &mock.RequestAssertion{ + Host: api.DefaultMockHost, + Header: api.DefaultWriteMockHeaders, + Method: "PUT", + Path: "/api/v1/deployments/traffic-filter/rulesets/some-random-id", + Query: url.Values{}, + Body: mock.NewStringBody(`{"description":"Allow access from 1.1.1.1 and 1.1.1.0/16","include_by_default":` + includeByDefault + `,"name":"my traffic filter","region":"us-east-1","rules":[{"id":"some-random-rule-id-1","source":"1.1.1.1"},{"id":"some-random-rule-id-2","source":"1.1.1.0/16"}],"type":"ip"}` + "\n"), + }, + mock.SampleInternalError().Response.Body, + ) +} + +func readResponse(includeAssociations string, includeByDefault string) mock.Response { + return mock.New200ResponseAssertion( + &mock.RequestAssertion{ + Host: api.DefaultMockHost, + Header: api.DefaultReadMockHeaders, + Method: "GET", + Path: "/api/v1/deployments/traffic-filter/rulesets/some-random-id", + Query: url.Values{ + "include_associations": []string{includeAssociations}, + }, + }, + mock.NewStringBody(`{ + "id" : "some-random-id", + "name" : "my traffic filter", + "description" : "Allow access from 1.1.1.1 and 1.1.1.0/16", + "type": "ip", + "include_by_default": `+includeByDefault+`, + "region": "us-east-1", + "rules": [ + { + "id" : "some-random-rule-id-1", + "source" : "1.1.1.1" + }, + { + "id" : "some-random-rule-id-2", + "source" : "1.1.1.0/16" + } + ] + }`, + ), + ) +} + +func notFoundReadResponse(includeAssociations string) mock.Response { + return mock.New404ResponseAssertion( + &mock.RequestAssertion{ + Host: api.DefaultMockHost, + Header: api.DefaultReadMockHeaders, + Method: "GET", + Path: "/api/v1/deployments/traffic-filter/rulesets/some-random-id", + Query: url.Values{ + "include_associations": []string{includeAssociations}, + }, + }, + mock.NewStringBody(`{ }`), + ) +} +func failedReadResponse(includeAssociations string) mock.Response { + return mock.New500ResponseAssertion( + &mock.RequestAssertion{ + Host: api.DefaultMockHost, + Header: api.DefaultReadMockHeaders, + Method: "GET", + Path: "/api/v1/deployments/traffic-filter/rulesets/some-random-id", + Query: url.Values{ + "include_associations": []string{includeAssociations}, + }, + }, + mock.SampleInternalError().Response.Body, + ) +} + +func deleteResponse() mock.Response { + return mock.New200ResponseAssertion( + &mock.RequestAssertion{ + Host: api.DefaultMockHost, + Header: api.DefaultReadMockHeaders, + Method: "DELETE", + Path: "/api/v1/deployments/traffic-filter/rulesets/some-random-id", + Query: url.Values{ + "ignore_associations": []string{"false"}, + }, + }, + mock.NewStringBody(`{}`), + ) +} + +func alreadyDeletedResponse() mock.Response { + return mock.New404ResponseAssertion( + &mock.RequestAssertion{ + Host: api.DefaultMockHost, + Header: api.DefaultReadMockHeaders, + Method: "DELETE", + Path: "/api/v1/deployments/traffic-filter/rulesets/some-random-id", + Query: url.Values{ + "ignore_associations": []string{"false"}, + }, + }, + mock.NewStringBody(`{ }`), + ) +} + +func failedDeletionResponse() mock.Response { + mock.SampleInternalError() + return mock.New500ResponseAssertion( + &mock.RequestAssertion{ + Host: api.DefaultMockHost, + Header: api.DefaultReadMockHeaders, + Method: "DELETE", + Path: "/api/v1/deployments/traffic-filter/rulesets/some-random-id", + Query: url.Values{ + "ignore_associations": []string{"false"}, + }, + }, + mock.SampleInternalError().Response.Body, + ) +} + +func protoV6ProviderFactoriesWithMockClient(client *api.API) map[string]func() (tfprotov6.ProviderServer, error) { + return map[string]func() (tfprotov6.ProviderServer, error){ + "ec": func() (tfprotov6.ProviderServer, error) { + return providerserver.NewProtocol6(provider.ProviderWithClient(client, "unit-tests"))(), nil + }, + } +} diff --git a/ec/ecresource/trafficfilterresource/schema.go b/ec/ecresource/trafficfilterresource/schema.go index 661acf248..5d5cb23d3 100644 --- a/ec/ecresource/trafficfilterresource/schema.go +++ b/ec/ecresource/trafficfilterresource/schema.go @@ -18,98 +18,173 @@ package trafficfilterresource import ( - "bytes" + "context" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/elastic/cloud-sdk-go/pkg/api" + + "github.com/elastic/terraform-provider-ec/ec/internal" + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" ) -// newSchema returns the schema for an "ec_deployment_traffic_filter" resource. -func newSchema() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Description: "Required name of the ruleset", - Required: true, - }, - "type": { - Type: schema.TypeString, - Description: `Required type of the ruleset ("ip", "vpce" or "azure_private_endpoint")`, - Required: true, - }, - "region": { - Type: schema.TypeString, - Description: "Required filter region, the ruleset can only be attached to deployments in the specific region", - Required: true, - }, - "rule": { - Type: schema.TypeSet, - Set: trafficFilterRuleHash, - Description: "Required list of rules, which the ruleset is made of.", - Required: true, - MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "source": { - Type: schema.TypeString, - Description: "Required traffic filter source: IP address, CIDR mask, or VPC endpoint ID, not required when the type is azure_private_endpoint", - Optional: true, - }, - - "description": { - Type: schema.TypeString, - Description: "Optional rule description", - Optional: true, - }, - - "azure_endpoint_name": { - Type: schema.TypeString, - Description: "Optional Azure endpoint name", - Optional: true, - }, - - "azure_endpoint_guid": { - Type: schema.TypeString, - Description: "Optional Azure endpoint GUID", - Optional: true, - }, - - "id": { - Type: schema.TypeString, - Description: "Computed rule ID", - Computed: true, - }, +// Ensure provider defined types fully satisfy framework interfaces +var _ resource.Resource = &Resource{} +var _ resource.ResourceWithConfigure = &Resource{} +var _ resource.ResourceWithGetSchema = &Resource{} +var _ resource.ResourceWithImportState = &Resource{} +var _ resource.ResourceWithMetadata = &Resource{} + +func (r *Resource) GetSchema(_ context.Context) (tfsdk.Schema, diag.Diagnostics) { + return tfsdk.Schema{ + Attributes: map[string]tfsdk.Attribute{ + "id": { + Type: types.StringType, + MarkdownDescription: "Unique identifier of this resource.", + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "name": { + Type: types.StringType, + Description: "Required name of the ruleset", + Required: true, + }, + "type": { + Type: types.StringType, + Description: `Required type of the ruleset ("ip", "vpce" or "azure_private_endpoint")`, + Required: true, + }, + "region": { + Type: types.StringType, + Description: "Required filter region, the ruleset can only be attached to deployments in the specific region", + Required: true, + }, + "include_by_default": { + Type: types.BoolType, + Description: "Should the ruleset be automatically included in the new deployments (Defaults to false)", + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.Bool{Value: false}), }, }, + "description": { + Type: types.StringType, + Description: "Optional ruleset description", + Optional: true, + }, }, - - "include_by_default": { - Type: schema.TypeBool, - Description: "Should the ruleset be automatically included in the new deployments (Defaults to false)", - Optional: true, - Default: false, + Blocks: map[string]tfsdk.Block{ + "rule": trafficFilterRuleSchema(), }, - "description": { - Type: schema.TypeString, - Description: "Optional ruleset description", - Optional: true, + }, nil +} + +func trafficFilterRuleSchema() tfsdk.Block { + return tfsdk.Block{ + Description: "Required set of rules, which the ruleset is made of.", + NestingMode: tfsdk.BlockNestingModeSet, + MinItems: 1, + Attributes: map[string]tfsdk.Attribute{ + "source": { + Type: types.StringType, + Description: "Optional traffic filter source: IP address, CIDR mask, or VPC endpoint ID, not required when the type is azure_private_endpoint", + Optional: true, + }, + "description": { + Type: types.StringType, + Description: "Optional rule description", + Optional: true, + }, + "azure_endpoint_name": { + Type: types.StringType, + Description: "Optional Azure endpoint name", + Optional: true, + }, + "azure_endpoint_guid": { + Type: types.StringType, + Description: "Optional Azure endpoint GUID", + Optional: true, + }, + "id": { + Type: types.StringType, + Description: "Computed rule ID", + Computed: true, + // NOTE: The ID will change on update, so we intentionally do not use plan modifier resource.UseStateForUnknown() here! + }, }, } } -func trafficFilterRuleHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - if m["source"] != nil { - buf.WriteString(m["source"].(string)) - } - if m["description"] != nil { - buf.WriteString(m["description"].(string)) - } - if m["azure_endpoint_name"] != nil { - buf.WriteString(m["azure_endpoint_name"].(string)) - } - if m["azure_endpoint_guid"] != nil { - buf.WriteString(m["azure_endpoint_guid"].(string)) +func trafficFilterRuleSetType() attr.Type { + return trafficFilterRuleSchema().Type() +} + +func trafficFilterRuleElemType() attr.Type { + return trafficFilterRuleSchema().Type().(types.SetType).ElemType +} + +func trafficFilterRuleAttrTypes() map[string]attr.Type { + return trafficFilterRuleSchema().Type().(types.SetType).ElemType.(types.ObjectType).AttrTypes +} + +/* TODO +Timeouts: &schema.ResourceTimeout{ + Default: schema.DefaultTimeout(10 * time.Minute), +}, +*/ + +type Resource struct { + client *api.API +} + +func resourceReady(r Resource, dg *diag.Diagnostics) bool { + if r.client == nil { + dg.AddError( + "Unconfigured API Client", + "Expected configured API client. Please report this issue to the provider developers.", + ) + + return false } - return schema.HashString(buf.String()) + return true +} + +func (r *Resource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("id"), request.ID)...) +} + +func (r *Resource) Configure(ctx context.Context, request resource.ConfigureRequest, response *resource.ConfigureResponse) { + client, diags := internal.ConvertProviderData(request.ProviderData) + response.Diagnostics.Append(diags...) + r.client = client +} + +func (r *Resource) Metadata(ctx context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { + response.TypeName = request.ProviderTypeName + "_deployment_traffic_filter" +} + +type modelV0 struct { + ID types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + Type types.String `tfsdk:"type"` + Region types.String `tfsdk:"region"` + Rule types.Set `tfsdk:"rule"` //< trafficFilterRuleModelV0 + IncludeByDefault types.Bool `tfsdk:"include_by_default"` + Description types.String `tfsdk:"description"` +} + +type trafficFilterRuleModelV0 struct { + ID types.String `tfsdk:"id"` + Source types.String `tfsdk:"source"` + Description types.String `tfsdk:"description"` + AzureEndpointName types.String `tfsdk:"azure_endpoint_name"` + AzureEndpointGUID types.String `tfsdk:"azure_endpoint_guid"` } diff --git a/ec/ecresource/trafficfilterresource/schema_test.go b/ec/ecresource/trafficfilterresource/schema_test.go deleted file mode 100644 index f7bb278d5..000000000 --- a/ec/ecresource/trafficfilterresource/schema_test.go +++ /dev/null @@ -1,69 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package trafficfilterresource - -import "testing" - -func Test_trafficFilterRuleHash(t *testing.T) { - type args struct { - v interface{} - } - tests := []struct { - name string - args args - want int - }{ - { - name: "hash a rule without description", - args: args{v: map[string]interface{}{ - "source": "8.8.8.8/24", - }}, - want: 1202035824, - }, - { - name: "hash a rule with description", - args: args{v: map[string]interface{}{ - "source": "8.8.8.8/24", - "description": "google dns", - }}, - want: 1579348650, - }, - { - name: "hash a rule different without description", - args: args{v: map[string]interface{}{ - "source": "8.8.4.4/24", - }}, - want: 2058478515, - }, - { - name: "hash a rule different with description", - args: args{v: map[string]interface{}{ - "source": "8.8.4.4/24", - "description": "alternate google dns", - }}, - want: 766352945, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := trafficFilterRuleHash(tt.args.v); got != tt.want { - t.Errorf("trafficFilterRuleHash() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/ec/ecresource/trafficfilterresource/testutils.go b/ec/ecresource/trafficfilterresource/testutils.go index ecbbd2839..90ee59334 100644 --- a/ec/ecresource/trafficfilterresource/testutils.go +++ b/ec/ecresource/trafficfilterresource/testutils.go @@ -17,19 +17,38 @@ package trafficfilterresource -func newSampleTrafficFilter() map[string]interface{} { - return map[string]interface{}{ - "name": "my traffic filter", - "type": "ip", - "include_by_default": false, - "region": "us-east-1", - "rule": []interface{}{ - map[string]interface{}{ - "source": "1.1.1.1", - }, - map[string]interface{}{ - "source": "0.0.0.0/0", +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func newSampleTrafficFilter(id string) modelV0 { + return modelV0{ + ID: types.String{Value: id}, + Name: types.String{Value: "my traffic filter"}, + Type: types.String{Value: "ip"}, + IncludeByDefault: types.Bool{Value: false}, + Region: types.String{Value: "us-east-1"}, + Description: types.String{Null: true}, + Rule: types.Set{ + ElemType: trafficFilterRuleElemType(), + Elems: []attr.Value{ + newSampleTrafficFilterRule("1.1.1.1", "", "", "", ""), + newSampleTrafficFilterRule("0.0.0.0/0", "", "", "", ""), }, }, } } + +func newSampleTrafficFilterRule(source string, description string, azureEndpointName string, azureEndpointGUID string, id string) types.Object { + return types.Object{ + AttrTypes: trafficFilterRuleAttrTypes(), + Attrs: map[string]attr.Value{ + "source": types.String{Value: source, Null: source == ""}, + "description": types.String{Value: description, Null: description == ""}, + "azure_endpoint_name": types.String{Value: azureEndpointName, Null: azureEndpointName == ""}, + "azure_endpoint_guid": types.String{Value: azureEndpointGUID, Null: azureEndpointGUID == ""}, + "id": types.String{Value: id}, + }, + } +} diff --git a/ec/ecresource/trafficfilterresource/update.go b/ec/ecresource/trafficfilterresource/update.go index c5b3d87cf..fdbe03cde 100644 --- a/ec/ecresource/trafficfilterresource/update.go +++ b/ec/ecresource/trafficfilterresource/update.go @@ -20,24 +20,49 @@ package trafficfilterresource import ( "context" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/resource" - "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" ) // Update will update an existing deployment traffic filter ruleset -func update(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var client = meta.(*api.API) +func (r Resource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + if !resourceReady(r, &response.Diagnostics) { + return + } + + var newState modelV0 + + diags := request.Plan.Get(ctx, &newState) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + trafficFilterRulesetRequest, diags := expandModel(ctx, newState) _, err := trafficfilterapi.Update(trafficfilterapi.UpdateParams{ - API: client, ID: d.Id(), - Req: expandModel(d), + API: r.client, ID: newState.ID.Value, + Req: trafficFilterRulesetRequest, }) if err != nil { - return diag.FromErr(err) + response.Diagnostics.AddError(err.Error(), err.Error()) + return + } + + found, diags := r.read(ctx, newState.ID.Value, &newState) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + if !found { + // We can't unset the state here, and must make sure to set the state according to the plan below. + // So all we do is add a warning. + diags.AddWarning( + "Failed to read traffic filter rule.", + "Please run terraform refresh to ensure a consistent state.", + ) } - return read(ctx, d, meta) + // Finally, set the state + response.Diagnostics.Append(response.State.Set(ctx, newState)...) } diff --git a/ec/internal/planmodifier/default_value.go b/ec/internal/planmodifier/default_value.go index 75ad85026..5522279c4 100644 --- a/ec/internal/planmodifier/default_value.go +++ b/ec/internal/planmodifier/default_value.go @@ -47,16 +47,9 @@ func (m *defaultValueAttributePlanModifier) MarkdownDescription(ctx context.Cont } func (m *defaultValueAttributePlanModifier) Modify(_ context.Context, req tfsdk.ModifyAttributePlanRequest, res *tfsdk.ModifyAttributePlanResponse) { - // If the attribute configuration is not null, we are done here if !req.AttributeConfig.IsNull() { return } - // If the attribute plan is "known" and "not null", then a previous plan m in the sequence - // has already been applied, and we don't want to interfere. - if !req.AttributePlan.IsUnknown() && !req.AttributePlan.IsNull() { - return - } - res.AttributePlan = m.DefaultValue } diff --git a/ec/provider.go b/ec/provider.go index afffa4176..264697b8f 100644 --- a/ec/provider.go +++ b/ec/provider.go @@ -80,7 +80,6 @@ func LegacyProvider() *schema.Provider { ResourcesMap: map[string]*schema.Resource{ "ec_deployment": deploymentresource.Resource(), "ec_deployment_elasticsearch_keystore": elasticsearchkeystoreresource.Resource(), - "ec_deployment_traffic_filter": trafficfilterresource.Resource(), "ec_deployment_extension": extensionresource.Resource(), }, } @@ -173,6 +172,7 @@ func (p *Provider) DataSources(ctx context.Context) []func() datasource.DataSour func (p *Provider) Resources(ctx context.Context) []func() resource.Resource { return []func() resource.Resource{ + func() resource.Resource { return &trafficfilterresource.Resource{} }, func() resource.Resource { return &trafficfilterassocresource.Resource{} }, } } From 2084e390676c0df4f18c504cdb1738bc793d07bb Mon Sep 17 00:00:00 2001 From: Pascal Hofmann Date: Wed, 21 Sep 2022 17:37:32 +0200 Subject: [PATCH 008/104] Use blocks and ListNestedAttributes for nested schemas in datasources * Use ListNestedAttributes for nested schemas in datasource ec_stack * Use ListNestedAttributes for nested schemas in datasource ec_deployment * Use blocks and ListNestedAttributes for nested schemas in datasource ec_deployments --- docs/data-sources/ec_stack.md | 2 +- ec/acc/datasource_stack_test.go | 2 +- ec/acc/testdata/datasource_stack_regex.tf | 2 +- .../deploymentdatasource/flatteners_apm.go | 4 +- .../flatteners_apm_test.go | 8 +- .../flatteners_elasticsearch.go | 6 +- .../flatteners_elasticsearch_test.go | 8 +- .../flatteners_enterprise_search.go | 4 +- .../flatteners_enterprise_search_test.go | 8 +- .../flatteners_integrations_server.go | 4 +- .../flatteners_integrations_server_test.go | 8 +- .../deploymentdatasource/flatteners_kibana.go | 4 +- .../flatteners_kibana_test.go | 8 +- .../deploymentdatasource/schema.go | 54 ++--- .../deploymentdatasource/schema_apm.go | 106 +++++++--- .../schema_elasticsearch.go | 192 +++++++++++++---- .../schema_enterprise_search.go | 124 ++++++++--- .../schema_integrations_server.go | 106 +++++++--- .../deploymentdatasource/schema_kibana.go | 106 +++++++--- .../schema_observability.go | 38 +++- .../deploymentsdatasource/expanders_test.go | 36 ++-- .../deploymentsdatasource/schema.go | 195 +++++++++++++----- ec/ecdatasource/stackdatasource/datasource.go | 10 +- .../stackdatasource/datasource_test.go | 22 +- .../stackdatasource/flatteners_apm.go | 2 +- .../stackdatasource/flatteners_apm_test.go | 2 - .../flatteners_elasticsearch.go | 6 +- .../flatteners_elasticsearch_test.go | 6 +- .../flatteners_enterprise_search.go | 2 +- .../flatteners_enterprise_search_test.go | 2 - .../stackdatasource/flatteners_kibana.go | 2 +- .../stackdatasource/flatteners_kibana_test.go | 2 - ec/ecdatasource/stackdatasource/schema.go | 140 ++++++++++--- go.mod | 4 +- go.sum | 25 +-- 35 files changed, 877 insertions(+), 373 deletions(-) diff --git a/docs/data-sources/ec_stack.md b/docs/data-sources/ec_stack.md index 076e4cadf..480f585e1 100644 --- a/docs/data-sources/ec_stack.md +++ b/docs/data-sources/ec_stack.md @@ -4,7 +4,7 @@ description: |- Retrieves information about an Elastic Cloud stack. --- -# Data Source: ec_deployment +# Data Source: ec_stack Use this data source to retrieve information about an existing Elastic Cloud stack. diff --git a/ec/acc/datasource_stack_test.go b/ec/acc/datasource_stack_test.go index 6e9c0e11b..382f3d759 100644 --- a/ec/acc/datasource_stack_test.go +++ b/ec/acc/datasource_stack_test.go @@ -60,7 +60,7 @@ func TestAccDatasourceStack_regex(t *testing.T) { Config: cfg, PreventDiskCleanup: true, Check: checkDataSourceStack(datasourceName, - resource.TestCheckResourceAttr(datasourceName, "version_regex", "7.0.?"), + resource.TestCheckResourceAttr(datasourceName, "version_regex", "8.4.?"), resource.TestCheckResourceAttr(datasourceName, "region", getRegion()), ), }, diff --git a/ec/acc/testdata/datasource_stack_regex.tf b/ec/acc/testdata/datasource_stack_regex.tf index 4f5a7624e..126b6e9a0 100644 --- a/ec/acc/testdata/datasource_stack_regex.tf +++ b/ec/acc/testdata/datasource_stack_regex.tf @@ -1,4 +1,4 @@ data "ec_stack" "regex" { - version_regex = "7.0.?" + version_regex = "8.4.?" region = "%s" } \ No newline at end of file diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_apm.go b/ec/ecdatasource/deploymentdatasource/flatteners_apm.go index cb5c8c1df..54152e366 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_apm.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_apm.go @@ -33,10 +33,10 @@ import ( // flattened form. func flattenApmResources(ctx context.Context, in []*models.ApmResourceInfo, target interface{}) diag.Diagnostics { var diags diag.Diagnostics - var result = make([]apmResourceModelV0, 0, len(in)) + var result = make([]apmResourceInfoModelV0, 0, len(in)) for _, res := range in { - model := apmResourceModelV0{ + model := apmResourceInfoModelV0{ Topology: types.List{ElemType: types.ObjectType{AttrTypes: apmTopologyAttrTypes()}}, } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go index 74d4ad6e6..8fa88c230 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go @@ -37,12 +37,12 @@ func Test_flattenApmResource(t *testing.T) { tests := []struct { name string args args - want []apmResourceModelV0 + want []apmResourceInfoModelV0 }{ { name: "empty resource list returns empty list", args: args{in: []*models.ApmResourceInfo{}}, - want: []apmResourceModelV0{}, + want: []apmResourceInfoModelV0{}, }, { name: "parses the apm resource", @@ -89,7 +89,7 @@ func Test_flattenApmResource(t *testing.T) { }, }, }}, - want: []apmResourceModelV0{{ + want: []apmResourceInfoModelV0{{ ElasticsearchClusterRefID: types.String{Value: "main-elasticsearch"}, RefID: types.String{Value: "main-apm"}, ResourceID: types.String{Value: mock.ValidClusterID}, @@ -118,7 +118,7 @@ func Test_flattenApmResource(t *testing.T) { diags := flattenApmResources(context.Background(), tt.args.in, &newState.Apm) assert.Empty(t, diags) - var got []apmResourceModelV0 + var got []apmResourceInfoModelV0 newState.Apm.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) }) diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go index a53a55115..27792bc08 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go @@ -36,10 +36,10 @@ import ( // flattened form. func flattenElasticsearchResources(ctx context.Context, in []*models.ElasticsearchResourceInfo, target interface{}) diag.Diagnostics { var diags diag.Diagnostics - var result = make([]elasticsearchResourceModelV0, 0, len(in)) + var result = make([]elasticsearchResourceInfoModelV0, 0, len(in)) for _, res := range in { - model := elasticsearchResourceModelV0{ + model := elasticsearchResourceInfoModelV0{ Topology: types.List{ElemType: types.ObjectType{AttrTypes: elasticsearchTopologyAttrTypes()}}, } @@ -168,7 +168,7 @@ func flattenElasticsearchTopology(ctx context.Context, plan *models.Elasticsearc } if !empty { - diags.Append(tfsdk.ValueFrom(ctx, []elasticsearchAutoscalingModel{autoscaling}, elasticsearchAutoscalingSchema(), &model.Autoscaling)...) + diags.Append(tfsdk.ValueFrom(ctx, []elasticsearchAutoscalingModel{autoscaling}, elasticsearchAutoscalingListType(), &model.Autoscaling)...) } result = append(result, model) diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go index cc82267b8..a3ff87b83 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go @@ -37,13 +37,13 @@ func Test_flattenElasticsearchResources(t *testing.T) { tests := []struct { name string args args - want []elasticsearchResourceModelV0 + want []elasticsearchResourceInfoModelV0 err string }{ { name: "empty resource list returns empty list", args: args{in: []*models.ElasticsearchResourceInfo{}}, - want: []elasticsearchResourceModelV0{}, + want: []elasticsearchResourceInfoModelV0{}, }, { name: "parses elasticsearch resource", @@ -121,7 +121,7 @@ func Test_flattenElasticsearchResources(t *testing.T) { }, }, }}, - want: []elasticsearchResourceModelV0{{ + want: []elasticsearchResourceInfoModelV0{{ Autoscale: types.String{Value: "true"}, RefID: types.String{Value: "main-elasticsearch"}, ResourceID: types.String{Value: mock.ValidClusterID}, @@ -168,7 +168,7 @@ func Test_flattenElasticsearchResources(t *testing.T) { diags := flattenElasticsearchResources(context.Background(), tt.args.in, &model.Elasticsearch) assert.Empty(t, diags) - var got []elasticsearchResourceModelV0 + var got []elasticsearchResourceInfoModelV0 model.Elasticsearch.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) }) diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search.go b/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search.go index 57236deca..34519abb6 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search.go @@ -33,10 +33,10 @@ import ( // flattened form. func flattenEnterpriseSearchResources(ctx context.Context, in []*models.EnterpriseSearchResourceInfo, target interface{}) diag.Diagnostics { var diags diag.Diagnostics - var result = make([]enterpriseSearchResourceModelV0, 0, len(in)) + var result = make([]enterpriseSearchResourceInfoModelV0, 0, len(in)) for _, res := range in { - model := enterpriseSearchResourceModelV0{ + model := enterpriseSearchResourceInfoModelV0{ Topology: types.List{ElemType: types.ObjectType{AttrTypes: enterpriseSearchTopologyAttrTypes()}}, } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go index ee52d1917..928054ecb 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go @@ -37,12 +37,12 @@ func Test_flattenEnterpriseSearchResource(t *testing.T) { tests := []struct { name string args args - want []enterpriseSearchResourceModelV0 + want []enterpriseSearchResourceInfoModelV0 }{ { name: "empty resource list returns empty list", args: args{in: []*models.EnterpriseSearchResourceInfo{}}, - want: []enterpriseSearchResourceModelV0{}, + want: []enterpriseSearchResourceInfoModelV0{}, }, { name: "parses the enterprisesearch resource", @@ -100,7 +100,7 @@ func Test_flattenEnterpriseSearchResource(t *testing.T) { }, }, }}, - want: []enterpriseSearchResourceModelV0{{ + want: []enterpriseSearchResourceInfoModelV0{{ ElasticsearchClusterRefID: types.String{Value: "main-elasticsearch"}, RefID: types.String{Value: "main-enterprise_search"}, ResourceID: types.String{Value: mock.ValidClusterID}, @@ -133,7 +133,7 @@ func Test_flattenEnterpriseSearchResource(t *testing.T) { diags := flattenEnterpriseSearchResources(context.Background(), tt.args.in, &model.EnterpriseSearch) assert.Empty(t, diags) - var got []enterpriseSearchResourceModelV0 + var got []enterpriseSearchResourceInfoModelV0 model.EnterpriseSearch.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) }) diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server.go b/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server.go index c66a9a829..7b1ab6c0f 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server.go @@ -33,10 +33,10 @@ import ( // flattened form. func flattenIntegrationsServerResources(ctx context.Context, in []*models.IntegrationsServerResourceInfo, target interface{}) diag.Diagnostics { var diags diag.Diagnostics - var result = make([]integrationsServerResourceModelV0, 0, len(in)) + var result = make([]integrationsServerResourceInfoModelV0, 0, len(in)) for _, res := range in { - model := integrationsServerResourceModelV0{ + model := integrationsServerResourceInfoModelV0{ Topology: types.List{ElemType: types.ObjectType{AttrTypes: integrationsServerTopologyAttrTypes()}}, } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go index 59afa71dd..eee01a0e1 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go @@ -37,12 +37,12 @@ func Test_flattenIntegrationsServerResource(t *testing.T) { tests := []struct { name string args args - want []integrationsServerResourceModelV0 + want []integrationsServerResourceInfoModelV0 }{ { name: "empty resource list returns empty list", args: args{in: []*models.IntegrationsServerResourceInfo{}}, - want: []integrationsServerResourceModelV0{}, + want: []integrationsServerResourceInfoModelV0{}, }, { name: "parses the integrations_server resource", @@ -91,7 +91,7 @@ func Test_flattenIntegrationsServerResource(t *testing.T) { }, }, }}, - want: []integrationsServerResourceModelV0{{ + want: []integrationsServerResourceInfoModelV0{{ ElasticsearchClusterRefID: types.String{Value: "main-elasticsearch"}, RefID: types.String{Value: "main-integrations_server"}, ResourceID: types.String{Value: mock.ValidClusterID}, @@ -120,7 +120,7 @@ func Test_flattenIntegrationsServerResource(t *testing.T) { diags := flattenIntegrationsServerResources(context.Background(), tt.args.in, &newState.IntegrationsServer) assert.Empty(t, diags) - var got []integrationsServerResourceModelV0 + var got []integrationsServerResourceInfoModelV0 newState.IntegrationsServer.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) }) diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_kibana.go b/ec/ecdatasource/deploymentdatasource/flatteners_kibana.go index 1c7bcc46f..b5a427cc6 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_kibana.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_kibana.go @@ -33,10 +33,10 @@ import ( // flattened form. func flattenKibanaResources(ctx context.Context, in []*models.KibanaResourceInfo, target interface{}) diag.Diagnostics { var diags diag.Diagnostics - var result = make([]kibanaResourceModelV0, 0, len(in)) + var result = make([]kibanaResourceInfoModelV0, 0, len(in)) for _, res := range in { - model := kibanaResourceModelV0{ + model := kibanaResourceInfoModelV0{ Topology: types.List{ElemType: types.ObjectType{AttrTypes: kibanaTopologyAttrTypes()}}, } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go index 97b52cc24..3602e9cb7 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go @@ -37,12 +37,12 @@ func Test_flattenKibanaResources(t *testing.T) { tests := []struct { name string args args - want []kibanaResourceModelV0 + want []kibanaResourceInfoModelV0 }{ { name: "empty resource list returns empty list", args: args{in: []*models.KibanaResourceInfo{}}, - want: []kibanaResourceModelV0{}, + want: []kibanaResourceInfoModelV0{}, }, { name: "parses the kibana resource", @@ -91,7 +91,7 @@ func Test_flattenKibanaResources(t *testing.T) { }, }, }}, - want: []kibanaResourceModelV0{{ + want: []kibanaResourceInfoModelV0{{ ElasticsearchClusterRefID: types.String{Value: "main-elasticsearch"}, RefID: types.String{Value: "main-kibana"}, ResourceID: types.String{Value: mock.ValidClusterID}, @@ -119,7 +119,7 @@ func Test_flattenKibanaResources(t *testing.T) { var model modelV0 diags := flattenKibanaResources(context.Background(), tt.args.in, &model.Kibana) assert.Empty(t, diags) - var got []kibanaResourceModelV0 + var got []kibanaResourceInfoModelV0 model.Kibana.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) }) diff --git a/ec/ecdatasource/deploymentdatasource/schema.go b/ec/ecdatasource/deploymentdatasource/schema.go index f77b0f1b4..3d5f8732b 100644 --- a/ec/ecdatasource/deploymentdatasource/schema.go +++ b/ec/ecdatasource/deploymentdatasource/schema.go @@ -29,40 +29,46 @@ func (d *DataSource) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnost return tfsdk.Schema{ Attributes: map[string]tfsdk.Attribute{ "alias": { - Type: types.StringType, - Computed: true, + Type: types.StringType, + Description: "Deployment alias.", + Computed: true, }, "healthy": { - Type: types.BoolType, - Computed: true, + Type: types.BoolType, + Description: "Overall health status of the deployment.", + Computed: true, }, "id": { - Type: types.StringType, - Required: true, + Type: types.StringType, + Description: "The unique ID of the deployment.", + Required: true, }, "name": { - Type: types.StringType, - Computed: true, + Type: types.StringType, + Description: "The name of the deployment.", + Computed: true, }, "region": { - Type: types.StringType, - Computed: true, + Type: types.StringType, + Description: "Region where the deployment can be found.", + Computed: true, }, "deployment_template_id": { - Type: types.StringType, - Computed: true, + Type: types.StringType, + Description: "ID of the deployment template used to create the deployment.", + Computed: true, }, "traffic_filter": { - Type: types.ListType{ElemType: types.StringType}, - Computed: true, + Type: types.ListType{ElemType: types.StringType}, + Description: "Traffic filter block, which contains a list of traffic filter rule identifiers.", + Computed: true, }, - "observability": observabilitySettingsSchema(), "tags": { - Type: types.MapType{ElemType: types.StringType}, - Computed: true, + Type: types.MapType{ElemType: types.StringType}, + Description: "Key value map of arbitrary string tags.", + Computed: true, }, - - // Deployment resources + "observability": observabilitySettingsSchema(), "elasticsearch": elasticsearchResourceInfoSchema(), "kibana": kibanaResourceInfoSchema(), "apm": apmResourceInfoSchema(), @@ -82,9 +88,9 @@ type modelV0 struct { TrafficFilter types.List `tfsdk:"traffic_filter"` //< string Observability types.List `tfsdk:"observability"` //< observabilitySettingsModel Tags types.Map `tfsdk:"tags"` //< string - Elasticsearch types.List `tfsdk:"elasticsearch"` //< elasticsearchResourceModelV0 - Kibana types.List `tfsdk:"kibana"` //< kibanaResourceModelV0 - Apm types.List `tfsdk:"apm"` //< apmResourceModelV0 - IntegrationsServer types.List `tfsdk:"integrations_server"` //< integrationsServerResourceModelV0 - EnterpriseSearch types.List `tfsdk:"enterprise_search"` //< enterpriseSearchResourceModelV0 + Elasticsearch types.List `tfsdk:"elasticsearch"` //< elasticsearchResourceInfoModelV0 + Kibana types.List `tfsdk:"kibana"` //< kibanaResourceInfoModelV0 + Apm types.List `tfsdk:"apm"` //< apmResourceInfoModelV0 + IntegrationsServer types.List `tfsdk:"integrations_server"` //< integrationsServerResourceInfoModelV0 + EnterpriseSearch types.List `tfsdk:"enterprise_search"` //< enterpriseSearchResourceInfoModelV0 } diff --git a/ec/ecdatasource/deploymentdatasource/schema_apm.go b/ec/ecdatasource/deploymentdatasource/schema_apm.go index 2030c6500..9fda9fdf7 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_apm.go +++ b/ec/ecdatasource/deploymentdatasource/schema_apm.go @@ -18,50 +18,102 @@ package deploymentdatasource import ( + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" ) func apmResourceInfoSchema() tfsdk.Attribute { - // TODO should we use tfsdk.ListNestedAttributes here? - see https://github.com/hashicorp/terraform-provider-hashicups-pf/blob/8f222d805d39445673e442a674168349a45bc054/hashicups/data_source_coffee.go#L22 return tfsdk.Attribute{ - Computed: true, - Type: types.ListType{ElemType: types.ObjectType{ - AttrTypes: apmResourceInfoAttrTypes(), - }}, + Description: "Instance configuration of the APM type.", + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "elasticsearch_cluster_ref_id": { + Type: types.StringType, + Description: "The user-specified ID of the Elasticsearch cluster to which this resource kind will link.", + Computed: true, + }, + "healthy": { + Type: types.BoolType, + Description: "Resource kind health status.", + Computed: true, + }, + "http_endpoint": { + Type: types.StringType, + Description: "HTTP endpoint for the resource kind.", + Computed: true, + }, + "https_endpoint": { + Type: types.StringType, + Description: "HTTPS endpoint for the resource kind.", + Computed: true, + }, + "ref_id": { + Type: types.StringType, + Description: "User specified ref_id for the resource kind.", + Computed: true, + }, + "resource_id": { + Type: types.StringType, + Description: "The resource unique identifier.", + Computed: true, + }, + "status": { + Type: types.StringType, + Description: "Resource kind status (for example, \"started\", \"stopped\", etc).", + Computed: true, + }, + "version": { + Type: types.StringType, + Description: "Elastic stack version.", + Computed: true, + }, + "topology": apmTopologySchema(), + }), } } func apmResourceInfoAttrTypes() map[string]attr.Type { - return map[string]attr.Type{ - "elasticsearch_cluster_ref_id": types.StringType, - "healthy": types.BoolType, - "http_endpoint": types.StringType, - "https_endpoint": types.StringType, - "ref_id": types.StringType, - "resource_id": types.StringType, - "status": types.StringType, - "version": types.StringType, - "topology": apmTopologySchema(), - } + return apmResourceInfoSchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes } -func apmTopologySchema() attr.Type { - return types.ListType{ElemType: types.ObjectType{ - AttrTypes: apmTopologyAttrTypes(), - }} + +func apmTopologySchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Node topology element definition.", + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "instance_configuration_id": { + Type: types.StringType, + Description: "Controls the allocation of this topology element as well as allowed sizes and node_types. It needs to match the ID of an existing instance configuration.", + Computed: true, + }, + "size": { + Type: types.StringType, + Description: "Amount of resource per topology element in the \"g\" notation.", + Computed: true, + }, + "size_resource": { + Type: types.StringType, + Description: "Type of resource (\"memory\" or \"storage\")", + Computed: true, + }, + "zone_count": { + Type: types.Int64Type, + Description: "Number of zones in which nodes will be placed.", + Computed: true, + }, + }), + } } func apmTopologyAttrTypes() map[string]attr.Type { - return map[string]attr.Type{ - "instance_configuration_id": types.StringType, - "size": types.StringType, - "size_resource": types.StringType, - "zone_count": types.Int64Type, - } + return apmTopologySchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes } -type apmResourceModelV0 struct { +type apmResourceInfoModelV0 struct { ElasticsearchClusterRefID types.String `tfsdk:"elasticsearch_cluster_ref_id"` Healthy types.Bool `tfsdk:"healthy"` HttpEndpoint types.String `tfsdk:"http_endpoint"` diff --git a/ec/ecdatasource/deploymentdatasource/schema_elasticsearch.go b/ec/ecdatasource/deploymentdatasource/schema_elasticsearch.go index 17d24e00a..38eacacb1 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_elasticsearch.go +++ b/ec/ecdatasource/deploymentdatasource/schema_elasticsearch.go @@ -18,74 +18,178 @@ package deploymentdatasource import ( + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" ) func elasticsearchResourceInfoSchema() tfsdk.Attribute { - // TODO should we use tfsdk.ListNestedAttributes here? - see https://github.com/hashicorp/terraform-provider-hashicups-pf/blob/8f222d805d39445673e442a674168349a45bc054/hashicups/data_source_coffee.go#L22 return tfsdk.Attribute{ - Computed: true, - Type: types.ListType{ElemType: types.ObjectType{ - AttrTypes: elasticsearchResourceInfoAttrTypes(), - }}, + Description: "Instance configuration of the Elasticsearch resource kind.", + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "autoscale": { + Type: types.StringType, + Description: "Whether or not Elasticsearch autoscaling is enabled.", + Computed: true, + }, + "healthy": { + Type: types.BoolType, + Description: "Resource kind health status.", + Computed: true, + }, + "cloud_id": { + Type: types.StringType, + Description: "The encoded Elasticsearch credentials to use in Beats or Logstash.", + MarkdownDescription: "The encoded Elasticsearch credentials to use in Beats or Logstash. See [Configure Beats and Logstash with Cloud ID](https://www.elastic.co/guide/en/cloud/current/ec-cloud-id.html) for more information.", + Computed: true, + }, + "http_endpoint": { + Type: types.StringType, + Description: "HTTP endpoint for the resource kind.", + Computed: true, + }, + "https_endpoint": { + Type: types.StringType, + Description: "HTTPS endpoint for the resource kind.", + Computed: true, + }, + "ref_id": { + Type: types.StringType, + Description: "User specified ref_id for the resource kind.", + Computed: true, + }, + "resource_id": { + Type: types.StringType, + Description: "The resource unique identifier.", + Computed: true, + }, + "status": { + Type: types.StringType, + Description: "Resource kind status (for example, \"started\", \"stopped\", etc).", + Computed: true, + }, + "version": { + Type: types.StringType, + Description: "Elastic stack version.", + Computed: true, + }, + "topology": elasticsearchTopologySchema(), + }), } } func elasticsearchResourceInfoAttrTypes() map[string]attr.Type { - return map[string]attr.Type{ - "autoscale": types.StringType, - "healthy": types.BoolType, - "cloud_id": types.StringType, - "http_endpoint": types.StringType, - "https_endpoint": types.StringType, - "ref_id": types.StringType, - "resource_id": types.StringType, - "status": types.StringType, - "version": types.StringType, - "topology": elasticsearchTopologySchema(), - } + return elasticsearchResourceInfoSchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes } -func elasticsearchTopologySchema() attr.Type { - return types.ListType{ElemType: types.ObjectType{ - AttrTypes: elasticsearchTopologyAttrTypes(), - }} +func elasticsearchTopologySchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Node topology element definition.", + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "instance_configuration_id": { + Type: types.StringType, + Description: "Controls the allocation of this topology element as well as allowed sizes and node_types. It needs to match the ID of an existing instance configuration.", + Computed: true, + }, + "size": { + Type: types.StringType, + Description: "Amount of resource per topology element in the \"g\" notation.", + Computed: true, + }, + "size_resource": { + Type: types.StringType, + Description: "Type of resource (\"memory\" or \"storage\")", + Computed: true, + }, + "zone_count": { + Type: types.Int64Type, + Description: "Number of zones in which nodes will be placed.", + Computed: true, + }, + "node_type_data": { + Type: types.BoolType, + Description: "Defines whether this node can hold data (<7.10.0).", + Computed: true, + }, + "node_type_master": { + Type: types.BoolType, + Description: " Defines whether this node can be elected master (<7.10.0).", + Computed: true, + }, + "node_type_ingest": { + Type: types.BoolType, + Description: "Defines whether this node can run an ingest pipeline (<7.10.0).", + Computed: true, + }, + "node_type_ml": { + Type: types.BoolType, + Description: "Defines whether this node can run ML jobs (<7.10.0).", + Computed: true, + }, + "node_roles": { + Type: types.SetType{ElemType: types.StringType}, + Description: "Defines the list of Elasticsearch node roles assigned to the topology element (>=7.10.0).", + Computed: true, + }, + "autoscaling": elasticsearchAutoscalingSchema(), + }), + } } func elasticsearchTopologyAttrTypes() map[string]attr.Type { - return map[string]attr.Type{ - "instance_configuration_id": types.StringType, - "size": types.StringType, - "size_resource": types.StringType, - "zone_count": types.Int64Type, - "node_type_data": types.BoolType, - "node_type_master": types.BoolType, - "node_type_ingest": types.BoolType, - "node_type_ml": types.BoolType, - "node_roles": types.SetType{ElemType: types.StringType}, - "autoscaling": elasticsearchAutoscalingSchema(), // Optional Elasticsearch autoscaling settings, such a maximum and minimum size and resources. + return elasticsearchTopologySchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes +} + +func elasticsearchAutoscalingSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Elasticsearch autoscaling settings, such a maximum and minimum size and resources.", + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "max_size_resource": { + Type: types.StringType, + Description: "Maximum resource type for the maximum autoscaling setting.", + Computed: true, + }, + "max_size": { + Type: types.StringType, + Description: "Maximum size value for the maximum autoscaling setting.", + Computed: true, + }, + "min_size_resource": { + Type: types.StringType, + Description: "Minimum resource type for the minimum autoscaling setting.", + Computed: true, + }, + "min_size": { + Type: types.StringType, + Description: "Minimum size value for the minimum autoscaling setting.", + Computed: true, + }, + "policy_override_json": { + Type: types.StringType, + Description: "Computed policy overrides set directly via the API or other clients.", + Computed: true, + }, + }), } } -func elasticsearchAutoscalingSchema() attr.Type { - return types.ListType{ElemType: types.ObjectType{ - AttrTypes: elasticsearchAutoscalingAttrTypes(), - }} +func elasticsearchAutoscalingListType() attr.Type { + return elasticsearchAutoscalingSchema().Attributes.Type() } func elasticsearchAutoscalingAttrTypes() map[string]attr.Type { - return map[string]attr.Type{ - "max_size_resource": types.StringType, // Maximum resource type for the maximum autoscaling setting. - "max_size": types.StringType, // Maximum size value for the maximum autoscaling setting. - "min_size_resource": types.StringType, // Minimum resource type for the minimum autoscaling setting. - "min_size": types.StringType, // Minimum size value for the minimum autoscaling setting. - "policy_override_json": types.StringType, // Computed policy overrides set directly via the API or other clients. - } + return elasticsearchAutoscalingListType().(types.ListType).ElemType.(types.ObjectType).AttrTypes + } -type elasticsearchResourceModelV0 struct { +type elasticsearchResourceInfoModelV0 struct { Autoscale types.String `tfsdk:"autoscale"` Healthy types.Bool `tfsdk:"healthy"` CloudID types.String `tfsdk:"cloud_id"` diff --git a/ec/ecdatasource/deploymentdatasource/schema_enterprise_search.go b/ec/ecdatasource/deploymentdatasource/schema_enterprise_search.go index b47fe0684..3f1b76e76 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_enterprise_search.go +++ b/ec/ecdatasource/deploymentdatasource/schema_enterprise_search.go @@ -18,53 +18,117 @@ package deploymentdatasource import ( + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" ) func enterpriseSearchResourceInfoSchema() tfsdk.Attribute { - // TODO should we use tfsdk.ListNestedAttributes here? - see https://github.com/hashicorp/terraform-provider-hashicups-pf/blob/8f222d805d39445673e442a674168349a45bc054/hashicups/data_source_coffee.go#L22 return tfsdk.Attribute{ - Computed: true, - Type: types.ListType{ElemType: types.ObjectType{ - AttrTypes: enterpriseSearchResourceInfoAttrTypes(), - }}, + Description: "Instance configuration of the Enterprise Search type.", + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "elasticsearch_cluster_ref_id": { + Type: types.StringType, + Description: "The user-specified ID of the Elasticsearch cluster to which this resource kind will link.", + Computed: true, + }, + "healthy": { + Type: types.BoolType, + Description: "Resource kind health status.", + Computed: true, + }, + "http_endpoint": { + Type: types.StringType, + Description: "HTTP endpoint for the resource kind.", + Computed: true, + }, + "https_endpoint": { + Type: types.StringType, + Description: "HTTPS endpoint for the resource kind.", + Computed: true, + }, + "ref_id": { + Type: types.StringType, + Description: "User specified ref_id for the resource kind.", + Computed: true, + }, + "resource_id": { + Type: types.StringType, + Description: "The resource unique identifier.", + Computed: true, + }, + "status": { + Type: types.StringType, + Description: "Resource kind status (for example, \"started\", \"stopped\", etc).", + Computed: true, + }, + "version": { + Type: types.StringType, + Description: "Elastic stack version.", + Computed: true, + }, + "topology": enterpriseSearchTopologySchema(), + }), } } func enterpriseSearchResourceInfoAttrTypes() map[string]attr.Type { - return map[string]attr.Type{ - "elasticsearch_cluster_ref_id": types.StringType, - "healthy": types.BoolType, - "http_endpoint": types.StringType, - "https_endpoint": types.StringType, - "ref_id": types.StringType, - "resource_id": types.StringType, - "status": types.StringType, - "version": types.StringType, - "topology": enterpriseSearchTopologySchema(), - } + return enterpriseSearchResourceInfoSchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes } -func enterpriseSearchTopologySchema() attr.Type { - return types.ListType{ElemType: types.ObjectType{ - AttrTypes: enterpriseSearchTopologyAttrTypes(), - }} + +func enterpriseSearchTopologySchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Node topology element definition.", + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "instance_configuration_id": { + Type: types.StringType, + Description: "Controls the allocation of this topology element as well as allowed sizes and node_types. It needs to match the ID of an existing instance configuration.", + Computed: true, + }, + "size": { + Type: types.StringType, + Description: "Amount of resource per topology element in the \"g\" notation.", + Computed: true, + }, + "size_resource": { + Type: types.StringType, + Description: "Type of resource (\"memory\" or \"storage\")", + Computed: true, + }, + "zone_count": { + Type: types.Int64Type, + Description: "Number of zones in which nodes will be placed.", + Computed: true, + }, + "node_type_appserver": { + Type: types.BoolType, + Description: "Defines whether this instance should run as application/API server.", + Computed: true, + }, + "node_type_connector": { + Type: types.BoolType, + Description: "Defines whether this instance should run as connector.", + Computed: true, + }, + "node_type_worker": { + Type: types.BoolType, + Description: "Defines whether this instance should run as background worker.", + Computed: true, + }, + }), + } } func enterpriseSearchTopologyAttrTypes() map[string]attr.Type { - return map[string]attr.Type{ - "instance_configuration_id": types.StringType, - "size": types.StringType, - "size_resource": types.StringType, - "zone_count": types.Int64Type, - "node_type_appserver": types.BoolType, - "node_type_connector": types.BoolType, - "node_type_worker": types.BoolType, - } + return enterpriseSearchTopologySchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes } -type enterpriseSearchResourceModelV0 struct { +type enterpriseSearchResourceInfoModelV0 struct { ElasticsearchClusterRefID types.String `tfsdk:"elasticsearch_cluster_ref_id"` Healthy types.Bool `tfsdk:"healthy"` HttpEndpoint types.String `tfsdk:"http_endpoint"` diff --git a/ec/ecdatasource/deploymentdatasource/schema_integrations_server.go b/ec/ecdatasource/deploymentdatasource/schema_integrations_server.go index f650cc60a..3fbbbd683 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_integrations_server.go +++ b/ec/ecdatasource/deploymentdatasource/schema_integrations_server.go @@ -18,50 +18,102 @@ package deploymentdatasource import ( + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" ) func integrationsServerResourceInfoSchema() tfsdk.Attribute { - // TODO should we use tfsdk.ListNestedAttributes here? - see https://github.com/hashicorp/terraform-provider-hashicups-pf/blob/8f222d805d39445673e442a674168349a45bc054/hashicups/data_source_coffee.go#L22 return tfsdk.Attribute{ - Computed: true, - Type: types.ListType{ElemType: types.ObjectType{ - AttrTypes: integrationsServerResourceInfoAttrTypes(), - }}, + Description: "Instance configuration of the Integrations Server type.", + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "elasticsearch_cluster_ref_id": { + Type: types.StringType, + Description: "The user-specified ID of the Elasticsearch cluster to which this resource kind will link.", + Computed: true, + }, + "healthy": { + Type: types.BoolType, + Description: "Resource kind health status.", + Computed: true, + }, + "http_endpoint": { + Type: types.StringType, + Description: "HTTP endpoint for the resource kind.", + Computed: true, + }, + "https_endpoint": { + Type: types.StringType, + Description: "HTTPS endpoint for the resource kind.", + Computed: true, + }, + "ref_id": { + Type: types.StringType, + Description: "User specified ref_id for the resource kind.", + Computed: true, + }, + "resource_id": { + Type: types.StringType, + Description: "The resource unique identifier.", + Computed: true, + }, + "status": { + Type: types.StringType, + Description: "Resource kind status (for example, \"started\", \"stopped\", etc).", + Computed: true, + }, + "version": { + Type: types.StringType, + Description: "Elastic stack version.", + Computed: true, + }, + "topology": integrationsServerTopologySchema(), + }), } } func integrationsServerResourceInfoAttrTypes() map[string]attr.Type { - return map[string]attr.Type{ - "elasticsearch_cluster_ref_id": types.StringType, - "healthy": types.BoolType, - "http_endpoint": types.StringType, - "https_endpoint": types.StringType, - "ref_id": types.StringType, - "resource_id": types.StringType, - "status": types.StringType, - "version": types.StringType, - "topology": integrationsServerTopologySchema(), - } + return integrationsServerResourceInfoSchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes } -func integrationsServerTopologySchema() attr.Type { - return types.ListType{ElemType: types.ObjectType{ - AttrTypes: integrationsServerTopologyAttrTypes(), - }} + +func integrationsServerTopologySchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Node topology element definition.", + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "instance_configuration_id": { + Type: types.StringType, + Description: "Controls the allocation of this topology element as well as allowed sizes and node_types. It needs to match the ID of an existing instance configuration.", + Computed: true, + }, + "size": { + Type: types.StringType, + Description: "Amount of resource per topology element in the \"g\" notation.", + Computed: true, + }, + "size_resource": { + Type: types.StringType, + Description: "Type of resource (\"memory\" or \"storage\")", + Computed: true, + }, + "zone_count": { + Type: types.Int64Type, + Description: "Number of zones in which nodes will be placed.", + Computed: true, + }, + }), + } } func integrationsServerTopologyAttrTypes() map[string]attr.Type { - return map[string]attr.Type{ - "instance_configuration_id": types.StringType, - "size": types.StringType, - "size_resource": types.StringType, - "zone_count": types.Int64Type, - } + return integrationsServerTopologySchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes } -type integrationsServerResourceModelV0 struct { +type integrationsServerResourceInfoModelV0 struct { ElasticsearchClusterRefID types.String `tfsdk:"elasticsearch_cluster_ref_id"` Healthy types.Bool `tfsdk:"healthy"` HttpEndpoint types.String `tfsdk:"http_endpoint"` diff --git a/ec/ecdatasource/deploymentdatasource/schema_kibana.go b/ec/ecdatasource/deploymentdatasource/schema_kibana.go index 3b425ddf3..db37223bb 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_kibana.go +++ b/ec/ecdatasource/deploymentdatasource/schema_kibana.go @@ -18,50 +18,102 @@ package deploymentdatasource import ( + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" ) func kibanaResourceInfoSchema() tfsdk.Attribute { - // TODO should we use tfsdk.ListNestedAttributes here? - see https://github.com/hashicorp/terraform-provider-hashicups-pf/blob/8f222d805d39445673e442a674168349a45bc054/hashicups/data_source_coffee.go#L22 return tfsdk.Attribute{ - Computed: true, - Type: types.ListType{ElemType: types.ObjectType{ - AttrTypes: kibanaResourceInfoAttrTypes(), - }}, + Description: "Instance configuration of the Kibana type.", + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "elasticsearch_cluster_ref_id": { + Type: types.StringType, + Description: "The user-specified ID of the Elasticsearch cluster to which this resource kind will link.", + Computed: true, + }, + "healthy": { + Type: types.BoolType, + Description: "Resource kind health status.", + Computed: true, + }, + "http_endpoint": { + Type: types.StringType, + Description: "HTTP endpoint for the resource kind.", + Computed: true, + }, + "https_endpoint": { + Type: types.StringType, + Description: "HTTPS endpoint for the resource kind.", + Computed: true, + }, + "ref_id": { + Type: types.StringType, + Description: "User specified ref_id for the resource kind.", + Computed: true, + }, + "resource_id": { + Type: types.StringType, + Description: "The resource unique identifier.", + Computed: true, + }, + "status": { + Type: types.StringType, + Description: "Resource kind status (for example, \"started\", \"stopped\", etc).", + Computed: true, + }, + "version": { + Type: types.StringType, + Description: "Elastic stack version.", + Computed: true, + }, + "topology": kibanaTopologySchema(), + }), } } func kibanaResourceInfoAttrTypes() map[string]attr.Type { - return map[string]attr.Type{ - "elasticsearch_cluster_ref_id": types.StringType, - "healthy": types.BoolType, - "http_endpoint": types.StringType, - "https_endpoint": types.StringType, - "ref_id": types.StringType, - "resource_id": types.StringType, - "status": types.StringType, - "version": types.StringType, - "topology": kibanaTopologySchema(), - } + return kibanaResourceInfoSchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes } -func kibanaTopologySchema() attr.Type { - return types.ListType{ElemType: types.ObjectType{ - AttrTypes: kibanaTopologyAttrTypes(), - }} + +func kibanaTopologySchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Node topology element definition.", + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "instance_configuration_id": { + Type: types.StringType, + Description: "Controls the allocation of this topology element as well as allowed sizes and node_types. It needs to match the ID of an existing instance configuration.", + Computed: true, + }, + "size": { + Type: types.StringType, + Description: "Amount of resource per topology element in the \"g\" notation.", + Computed: true, + }, + "size_resource": { + Type: types.StringType, + Description: "Type of resource (\"memory\" or \"storage\")", + Computed: true, + }, + "zone_count": { + Type: types.Int64Type, + Description: "Number of zones in which nodes will be placed.", + Computed: true, + }, + }), + } } func kibanaTopologyAttrTypes() map[string]attr.Type { - return map[string]attr.Type{ - "instance_configuration_id": types.StringType, - "size": types.StringType, - "size_resource": types.StringType, - "zone_count": types.Int64Type, - } + return kibanaTopologySchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes } -type kibanaResourceModelV0 struct { +type kibanaResourceInfoModelV0 struct { ElasticsearchClusterRefID types.String `tfsdk:"elasticsearch_cluster_ref_id"` Healthy types.Bool `tfsdk:"healthy"` HttpEndpoint types.String `tfsdk:"http_endpoint"` diff --git a/ec/ecdatasource/deploymentdatasource/schema_observability.go b/ec/ecdatasource/deploymentdatasource/schema_observability.go index 485447989..c1ba83216 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_observability.go +++ b/ec/ecdatasource/deploymentdatasource/schema_observability.go @@ -18,28 +18,44 @@ package deploymentdatasource import ( + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" ) func observabilitySettingsSchema() tfsdk.Attribute { - // TODO should we use tfsdk.ListNestedAttributes here? - see https://github.com/hashicorp/terraform-provider-hashicups-pf/blob/8f222d805d39445673e442a674168349a45bc054/hashicups/data_source_coffee.go#L22 return tfsdk.Attribute{ - Computed: true, - Type: types.ListType{ElemType: types.ObjectType{ - AttrTypes: observabilitySettingsAttrTypes(), - }}, + Description: "Observability settings. Information about logs and metrics shipped to a dedicated deployment.", + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "deployment_id": { + Type: types.StringType, + Description: "Destination deployment ID for the shipped logs and monitoring metrics.", + Computed: true, + }, + "ref_id": { + Type: types.StringType, + Description: "Elasticsearch resource kind ref_id of the destination deployment.", + Computed: true, + }, + "logs": { + Type: types.BoolType, + Description: "Defines whether logs are enabled or disabled.", + Computed: true, + }, + "metrics": { + Type: types.BoolType, + Description: "Defines whether metrics are enabled or disabled.", + Computed: true, + }, + }), } } func observabilitySettingsAttrTypes() map[string]attr.Type { - return map[string]attr.Type{ - "deployment_id": types.StringType, - "ref_id": types.StringType, - "logs": types.BoolType, - "metrics": types.BoolType, - } + return observabilitySettingsSchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes } type observabilitySettingsModel struct { diff --git a/ec/ecdatasource/deploymentsdatasource/expanders_test.go b/ec/ecdatasource/deploymentsdatasource/expanders_test.go index 8ddb20da5..8e1219ab5 100644 --- a/ec/ecdatasource/deploymentsdatasource/expanders_test.go +++ b/ec/ecdatasource/deploymentsdatasource/expanders_test.go @@ -72,9 +72,9 @@ func Test_expandFilters(t *testing.T) { Size: types.Int64{Value: 200}, Tags: util.StringMapAsType(map[string]string{"foo": "bar"}), Elasticsearch: types.List{ - ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes()}, + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(Elasticsearch)}, Elems: []attr.Value{types.Object{ - AttrTypes: resourceFiltersAttrTypes(), + AttrTypes: resourceFiltersAttrTypes(Elasticsearch), Attrs: map[string]attr.Value{ "healthy": types.String{Null: true}, "status": types.String{Null: true}, @@ -83,9 +83,9 @@ func Test_expandFilters(t *testing.T) { }}, }, Kibana: types.List{ - ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes()}, + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(Kibana)}, Elems: []attr.Value{types.Object{ - AttrTypes: resourceFiltersAttrTypes(), + AttrTypes: resourceFiltersAttrTypes(Kibana), Attrs: map[string]attr.Value{ "healthy": types.String{Null: true}, "status": types.String{Value: "started"}, @@ -94,9 +94,9 @@ func Test_expandFilters(t *testing.T) { }}, }, Apm: types.List{ - ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes()}, + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(Apm)}, Elems: []attr.Value{types.Object{ - AttrTypes: resourceFiltersAttrTypes(), + AttrTypes: resourceFiltersAttrTypes(Apm), Attrs: map[string]attr.Value{ "healthy": types.String{Value: "true"}, "status": types.String{Null: true}, @@ -105,9 +105,9 @@ func Test_expandFilters(t *testing.T) { }}, }, EnterpriseSearch: types.List{ - ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes()}, + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(EnterpriseSearch)}, Elems: []attr.Value{types.Object{ - AttrTypes: resourceFiltersAttrTypes(), + AttrTypes: resourceFiltersAttrTypes(EnterpriseSearch), Attrs: map[string]attr.Value{ "status": types.String{Null: true}, "healthy": types.String{Value: "false"}, @@ -167,9 +167,9 @@ func newInvalidFilters() modelV0 { return modelV0{ Healthy: types.String{Value: "invalid value"}, Apm: types.List{ - ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes()}, + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(Apm)}, Elems: []attr.Value{types.Object{ - AttrTypes: resourceFiltersAttrTypes(), + AttrTypes: resourceFiltersAttrTypes(Apm), Attrs: map[string]attr.Value{ "healthy": types.String{Value: "invalid value"}, }, @@ -187,9 +187,9 @@ func newSampleFilters() modelV0 { "foo": types.String{Value: "bar"}, }}, Elasticsearch: types.List{ - ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes()}, + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(Elasticsearch)}, Elems: []attr.Value{types.Object{ - AttrTypes: resourceFiltersAttrTypes(), + AttrTypes: resourceFiltersAttrTypes(Elasticsearch), Attrs: map[string]attr.Value{ "healthy": types.String{Null: true}, "status": types.String{Null: true}, @@ -198,9 +198,9 @@ func newSampleFilters() modelV0 { }}, }, Kibana: types.List{ - ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes()}, + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(Kibana)}, Elems: []attr.Value{types.Object{ - AttrTypes: resourceFiltersAttrTypes(), + AttrTypes: resourceFiltersAttrTypes(Kibana), Attrs: map[string]attr.Value{ "healthy": types.String{Null: true}, "status": types.String{Value: "started"}, @@ -209,9 +209,9 @@ func newSampleFilters() modelV0 { }}, }, Apm: types.List{ - ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes()}, + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(Apm)}, Elems: []attr.Value{types.Object{ - AttrTypes: resourceFiltersAttrTypes(), + AttrTypes: resourceFiltersAttrTypes(Apm), Attrs: map[string]attr.Value{ "healthy": types.String{Value: "true"}, "status": types.String{Null: true}, @@ -220,9 +220,9 @@ func newSampleFilters() modelV0 { }}, }, EnterpriseSearch: types.List{ - ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes()}, + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(EnterpriseSearch)}, Elems: []attr.Value{types.Object{ - AttrTypes: resourceFiltersAttrTypes(), + AttrTypes: resourceFiltersAttrTypes(EnterpriseSearch), Attrs: map[string]attr.Value{ "status": types.String{Null: true}, "healthy": types.String{Value: "false"}, diff --git a/ec/ecdatasource/deploymentsdatasource/schema.go b/ec/ecdatasource/deploymentsdatasource/schema.go index c633e0f98..cd6bbd122 100644 --- a/ec/ecdatasource/deploymentsdatasource/schema.go +++ b/ec/ecdatasource/deploymentsdatasource/schema.go @@ -19,6 +19,7 @@ package deploymentsdatasource import ( "context" + "fmt" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/diag" @@ -28,28 +29,44 @@ import ( "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" ) +type ResourceKind int + +const ( + Apm ResourceKind = iota + Elasticsearch + EnterpriseSearch + IntegrationsServer + Kibana +) + func (d *DataSource) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { return tfsdk.Schema{ Attributes: map[string]tfsdk.Attribute{ "name_prefix": { - Type: types.StringType, - Optional: true, + Type: types.StringType, + Description: "Prefix that one or several deployment names have in common.", + Optional: true, }, "healthy": { - Type: types.StringType, - Optional: true, + Type: types.StringType, + Description: "Overall health status of the deployment.", + Optional: true, }, "deployment_template_id": { - Type: types.StringType, - Optional: true, + Type: types.StringType, + Description: "ID of the deployment template used to create the deployment.", + Optional: true, }, "tags": { - Type: types.MapType{ElemType: types.StringType}, - Optional: true, + Type: types.MapType{ElemType: types.StringType}, + Description: "Key value map of arbitrary string tags for the deployment.\n", + Optional: true, }, "size": { - Type: types.Int64Type, - Optional: true, + Type: types.Int64Type, + Description: "The maximum number of deployments to return. Defaults to 100.", + MarkdownDescription: "The maximum number of deployments to return. Defaults to `100`.", + Optional: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.Int64{Value: 100}), }, @@ -62,67 +79,145 @@ func (d *DataSource) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnost MarkdownDescription: "Unique identifier of this data source.", }, "return_count": { - Type: types.Int64Type, - Computed: true, + Type: types.Int64Type, + Description: "The number of deployments actually returned.", + Computed: true, }, "deployments": deploymentsListSchema(), - + }, + Blocks: map[string]tfsdk.Block{ // Deployment resources - "elasticsearch": resourceFiltersSchema(), - "kibana": resourceFiltersSchema(), - "apm": resourceFiltersSchema(), - "integrations_server": resourceFiltersSchema(), - "enterprise_search": resourceFiltersSchema(), + "elasticsearch": resourceFiltersSchema(Elasticsearch), + "kibana": resourceFiltersSchema(Kibana), + "apm": resourceFiltersSchema(Apm), + "integrations_server": resourceFiltersSchema(IntegrationsServer), + "enterprise_search": resourceFiltersSchema(EnterpriseSearch), }, }, nil } func deploymentsListSchema() tfsdk.Attribute { - // TODO should we use tfsdk.ListNestedAttributes here? - see https://github.com/hashicorp/terraform-provider-hashicups-pf/blob/8f222d805d39445673e442a674168349a45bc054/hashicups/data_source_coffee.go#L22 return tfsdk.Attribute{ - Computed: true, - Type: types.ListType{ElemType: types.ObjectType{ - AttrTypes: deploymentAttrTypes(), - }}, + Description: "List of deployments which match the specified query.", + Computed: true, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "deployment_id": { + Type: types.StringType, + Description: "The deployment unique ID.", + Computed: true, + }, + "name": { + Type: types.StringType, + Description: "The name of the deployment.", + Computed: true, + }, + "alias": { + Type: types.StringType, + Description: "Deployment alias.", + Computed: true, + }, + "elasticsearch_resource_id": { + Type: types.StringType, + Description: "The Elasticsearch resource unique ID.", + Computed: true, + }, + "elasticsearch_ref_id": { + Type: types.StringType, + Description: "The Elasticsearch resource reference.", + Computed: true, + }, + "kibana_resource_id": { + Type: types.StringType, + Description: "The Kibana resource unique ID.", + Computed: true, + }, + "kibana_ref_id": { + Type: types.StringType, + Description: "The Kibana resource reference.", + Computed: true, + }, + "apm_resource_id": { + Type: types.StringType, + Description: "The APM resource unique ID.", + Computed: true, + }, + "apm_ref_id": { + Type: types.StringType, + Description: "The APM resource reference.", + Computed: true, + }, + "integrations_server_resource_id": { + Type: types.StringType, + Description: "The Integrations Server resource unique ID.", + Computed: true, + }, + "integrations_server_ref_id": { + Type: types.StringType, + Description: "The Integrations Server resource reference.", + Computed: true, + }, + "enterprise_search_resource_id": { + Type: types.StringType, + Description: "The Enterprise Search resource unique ID.", + Computed: true, + }, + "enterprise_search_ref_id": { + Type: types.StringType, + Description: "The Enterprise Search resource reference.", + Computed: true, + }, + }), } } func deploymentAttrTypes() map[string]attr.Type { - return map[string]attr.Type{ - "deployment_id": types.StringType, - "name": types.StringType, - "alias": types.StringType, - "elasticsearch_resource_id": types.StringType, - "elasticsearch_ref_id": types.StringType, - "kibana_resource_id": types.StringType, - "kibana_ref_id": types.StringType, - "apm_resource_id": types.StringType, - "apm_ref_id": types.StringType, - "integrations_server_resource_id": types.StringType, - "integrations_server_ref_id": types.StringType, - "enterprise_search_resource_id": types.StringType, - "enterprise_search_ref_id": types.StringType, - } + return deploymentsListSchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes + } -func resourceFiltersSchema() tfsdk.Attribute { - // TODO should we use tfsdk.ListNestedAttributes here? - see https://github.com/hashicorp/terraform-provider-hashicups-pf/blob/8f222d805d39445673e442a674168349a45bc054/hashicups/data_source_coffee.go#L22 - return tfsdk.Attribute{ - Optional: true, - Type: types.ListType{ElemType: types.ObjectType{ - AttrTypes: resourceFiltersAttrTypes(), - }}, +func (rk ResourceKind) Name() string { + switch rk { + case Apm: + return "APM" + case Elasticsearch: + return "Elasticsearch" + case EnterpriseSearch: + return "Enterprise Search" + case IntegrationsServer: + return "Integrations Server" + case Kibana: + return "Kibana" + default: + return "unknown" } } -func resourceFiltersAttrTypes() map[string]attr.Type { - return map[string]attr.Type{ - "healthy": types.StringType, - "status": types.StringType, - "version": types.StringType, +func resourceFiltersSchema(resourceKind ResourceKind) tfsdk.Block { + return tfsdk.Block{ + Description: fmt.Sprintf("Filter by %s resource kind status or configuration.", resourceKind.Name()), + NestingMode: tfsdk.BlockNestingModeList, + Attributes: map[string]tfsdk.Attribute{ + "healthy": { + Type: types.StringType, + Optional: true, + }, + "status": { + Type: types.StringType, + Optional: true, + }, + "version": { + Type: types.StringType, + Optional: true, + }, + }, } } +func resourceFiltersAttrTypes(resourceKind ResourceKind) map[string]attr.Type { + return resourceFiltersSchema(resourceKind).Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes + +} + type modelV0 struct { ID types.String `tfsdk:"id"` NamePrefix types.String `tfsdk:"name_prefix"` diff --git a/ec/ecdatasource/stackdatasource/datasource.go b/ec/ecdatasource/stackdatasource/datasource.go index 22a312ac3..aa693ff84 100644 --- a/ec/ecdatasource/stackdatasource/datasource.go +++ b/ec/ecdatasource/stackdatasource/datasource.go @@ -155,11 +155,17 @@ func stackFromFilters(expr, version string, locked bool, stacks []*models.StackV ) } -func newResourceKindConfigModelV0() resourceKindConfigModelV0 { - return resourceKindConfigModelV0{ +func newElasticsearchConfigModelV0() elasticSearchConfigModelV0 { + return elasticSearchConfigModelV0{ DenyList: types.List{ElemType: types.StringType}, CompatibleNodeTypes: types.List{ElemType: types.StringType}, Plugins: types.List{ElemType: types.StringType}, DefaultPlugins: types.List{ElemType: types.StringType}, } } +func newResourceKindConfigModelV0() resourceKindConfigModelV0 { + return resourceKindConfigModelV0{ + DenyList: types.List{ElemType: types.StringType}, + CompatibleNodeTypes: types.List{ElemType: types.StringType}, + } +} diff --git a/ec/ecdatasource/stackdatasource/datasource_test.go b/ec/ecdatasource/stackdatasource/datasource_test.go index 23755534e..44e885ce9 100644 --- a/ec/ecdatasource/stackdatasource/datasource_test.go +++ b/ec/ecdatasource/stackdatasource/datasource_test.go @@ -135,10 +135,10 @@ func newSampleStack() modelV0 { MinUpgradableFrom: types.String{Value: "6.8.0"}, Elasticsearch: types.List{ ElemType: types.ObjectType{ - AttrTypes: resourceKindConfigAttrTypes(), + AttrTypes: elasticSearchConfigAttrTypes(), }, Elems: []attr.Value{types.Object{ - AttrTypes: resourceKindConfigAttrTypes(), + AttrTypes: elasticSearchConfigAttrTypes(), Attrs: map[string]attr.Value{ "denylist": util.StringListAsType([]string{"some"}), "capacity_constraints_max": types.Int64{Value: 8192}, @@ -166,52 +166,46 @@ func newSampleStack() modelV0 { }, Kibana: types.List{ ElemType: types.ObjectType{ - AttrTypes: resourceKindConfigAttrTypes(), + AttrTypes: resourceKindConfigAttrTypes(Kibana), }, Elems: []attr.Value{types.Object{ - AttrTypes: resourceKindConfigAttrTypes(), + AttrTypes: resourceKindConfigAttrTypes(Kibana), Attrs: map[string]attr.Value{ "denylist": util.StringListAsType([]string{"some"}), "capacity_constraints_max": types.Int64{Value: 8192}, "capacity_constraints_min": types.Int64{Value: 512}, "compatible_node_types": util.StringListAsType(nil), "docker_image": types.String{Value: "docker.elastic.co/cloud-assets/kibana:7.9.1-0"}, - "plugins": util.StringListAsType(nil), - "default_plugins": util.StringListAsType(nil), }, }}, }, EnterpriseSearch: types.List{ ElemType: types.ObjectType{ - AttrTypes: resourceKindConfigAttrTypes(), + AttrTypes: resourceKindConfigAttrTypes(EnterpriseSearch), }, Elems: []attr.Value{types.Object{ - AttrTypes: resourceKindConfigAttrTypes(), + AttrTypes: resourceKindConfigAttrTypes(EnterpriseSearch), Attrs: map[string]attr.Value{ "denylist": util.StringListAsType([]string{"some"}), "capacity_constraints_max": types.Int64{Value: 8192}, "capacity_constraints_min": types.Int64{Value: 512}, "compatible_node_types": util.StringListAsType(nil), "docker_image": types.String{Value: "docker.elastic.co/cloud-assets/enterprise_search:7.9.1-0"}, - "plugins": util.StringListAsType(nil), - "default_plugins": util.StringListAsType(nil), }, }}, }, Apm: types.List{ ElemType: types.ObjectType{ - AttrTypes: resourceKindConfigAttrTypes(), + AttrTypes: resourceKindConfigAttrTypes(Apm), }, Elems: []attr.Value{types.Object{ - AttrTypes: resourceKindConfigAttrTypes(), + AttrTypes: resourceKindConfigAttrTypes(Apm), Attrs: map[string]attr.Value{ "denylist": util.StringListAsType([]string{"some"}), "capacity_constraints_max": types.Int64{Value: 8192}, "capacity_constraints_min": types.Int64{Value: 512}, "compatible_node_types": util.StringListAsType(nil), "docker_image": types.String{Value: "docker.elastic.co/cloud-assets/apm:7.9.1-0"}, - "plugins": util.StringListAsType(nil), - "default_plugins": util.StringListAsType(nil), }, }}, }, diff --git a/ec/ecdatasource/stackdatasource/flatteners_apm.go b/ec/ecdatasource/stackdatasource/flatteners_apm.go index 194017cbf..ce03799d9 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_apm.go +++ b/ec/ecdatasource/stackdatasource/flatteners_apm.go @@ -64,7 +64,7 @@ func flattenStackVersionApmConfig(ctx context.Context, res *models.StackVersionA diags.Append(tfsdk.ValueFrom(ctx, []resourceKindConfigModelV0{model}, types.ListType{ ElemType: types.ObjectType{ - AttrTypes: resourceKindConfigAttrTypes(), + AttrTypes: resourceKindConfigAttrTypes(Apm), }, }, target)...) diff --git a/ec/ecdatasource/stackdatasource/flatteners_apm_test.go b/ec/ecdatasource/stackdatasource/flatteners_apm_test.go index b55ae794a..627f90fc8 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_apm_test.go +++ b/ec/ecdatasource/stackdatasource/flatteners_apm_test.go @@ -65,8 +65,6 @@ func Test_flattenApmResource(t *testing.T) { CapacityConstraintsMin: types.Int64{Value: 512}, CompatibleNodeTypes: util.StringListAsType(nil), DockerImage: types.String{Value: "docker.elastic.co/cloud-assets/apm:7.9.1-0"}, - Plugins: util.StringListAsType(nil), - DefaultPlugins: util.StringListAsType(nil), }}, }, } diff --git a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go index 15026a434..2c3105d28 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go +++ b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go @@ -30,7 +30,7 @@ import ( // flattenStackVersionElasticsearchConfig takes a StackVersionElasticsearchConfig and flattens it. func flattenStackVersionElasticsearchConfig(ctx context.Context, res *models.StackVersionElasticsearchConfig, target interface{}) diag.Diagnostics { var diags diag.Diagnostics - model := newResourceKindConfigModelV0() + model := newElasticsearchConfigModelV0() empty := true if res == nil { @@ -72,9 +72,9 @@ func flattenStackVersionElasticsearchConfig(ctx context.Context, res *models.Sta return diags } - diags.Append(tfsdk.ValueFrom(ctx, []resourceKindConfigModelV0{model}, types.ListType{ + diags.Append(tfsdk.ValueFrom(ctx, []elasticSearchConfigModelV0{model}, types.ListType{ ElemType: types.ObjectType{ - AttrTypes: resourceKindConfigAttrTypes(), + AttrTypes: elasticSearchConfigAttrTypes(), }, }, target)...) diff --git a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go index 75a6b95f5..f4bd77d19 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go +++ b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go @@ -37,7 +37,7 @@ func Test_flattenElasticsearchResource(t *testing.T) { tests := []struct { name string args args - want []resourceKindConfigModelV0 + want []elasticSearchConfigModelV0 }{ { name: "empty resource list returns empty list", @@ -75,7 +75,7 @@ func Test_flattenElasticsearchResource(t *testing.T) { "repository-gcs", }, }}, - want: []resourceKindConfigModelV0{{ + want: []elasticSearchConfigModelV0{{ DenyList: util.StringListAsType([]string{"some"}), CapacityConstraintsMax: types.Int64{Value: 8192}, CapacityConstraintsMin: types.Int64{Value: 512}, @@ -106,7 +106,7 @@ func Test_flattenElasticsearchResource(t *testing.T) { diags := flattenStackVersionElasticsearchConfig(context.Background(), tt.args.res, &newState.Elasticsearch) assert.Empty(t, diags) - var got []resourceKindConfigModelV0 + var got []elasticSearchConfigModelV0 newState.Elasticsearch.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) }) diff --git a/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go b/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go index 614ad8e17..0db37b9f1 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go +++ b/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go @@ -64,7 +64,7 @@ func flattenStackVersionEnterpriseSearchConfig(ctx context.Context, res *models. diags.Append(tfsdk.ValueFrom(ctx, []resourceKindConfigModelV0{model}, types.ListType{ ElemType: types.ObjectType{ - AttrTypes: resourceKindConfigAttrTypes(), + AttrTypes: resourceKindConfigAttrTypes(EnterpriseSearch), }, }, target)...) diff --git a/ec/ecdatasource/stackdatasource/flatteners_enterprise_search_test.go b/ec/ecdatasource/stackdatasource/flatteners_enterprise_search_test.go index 426ee8b5e..dd39145cb 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_enterprise_search_test.go +++ b/ec/ecdatasource/stackdatasource/flatteners_enterprise_search_test.go @@ -65,8 +65,6 @@ func Test_flattenEnterpriseSearchResources(t *testing.T) { CapacityConstraintsMin: types.Int64{Value: 512}, CompatibleNodeTypes: util.StringListAsType(nil), DockerImage: types.String{Value: "docker.elastic.co/cloud-assets/enterprise_search:7.9.1-0"}, - Plugins: util.StringListAsType(nil), - DefaultPlugins: util.StringListAsType(nil), }}, }, } diff --git a/ec/ecdatasource/stackdatasource/flatteners_kibana.go b/ec/ecdatasource/stackdatasource/flatteners_kibana.go index 31060b7f8..d7eca89db 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_kibana.go +++ b/ec/ecdatasource/stackdatasource/flatteners_kibana.go @@ -64,7 +64,7 @@ func flattenStackVersionKibanaConfig(ctx context.Context, res *models.StackVersi diags.Append(tfsdk.ValueFrom(ctx, []resourceKindConfigModelV0{model}, types.ListType{ ElemType: types.ObjectType{ - AttrTypes: resourceKindConfigAttrTypes(), + AttrTypes: resourceKindConfigAttrTypes(Kibana), }, }, target)...) diff --git a/ec/ecdatasource/stackdatasource/flatteners_kibana_test.go b/ec/ecdatasource/stackdatasource/flatteners_kibana_test.go index 78083a172..513199bc9 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_kibana_test.go +++ b/ec/ecdatasource/stackdatasource/flatteners_kibana_test.go @@ -65,8 +65,6 @@ func Test_flattenKibanaResources(t *testing.T) { CapacityConstraintsMin: types.Int64{Value: 512}, CompatibleNodeTypes: util.StringListAsType(nil), DockerImage: types.String{Value: "docker.elastic.co/cloud-assets/kibana:7.9.1-0"}, - Plugins: util.StringListAsType(nil), - DefaultPlugins: util.StringListAsType(nil), }}, }, } diff --git a/ec/ecdatasource/stackdatasource/schema.go b/ec/ecdatasource/stackdatasource/schema.go index 9be8f45d9..b9e5773a8 100644 --- a/ec/ecdatasource/stackdatasource/schema.go +++ b/ec/ecdatasource/stackdatasource/schema.go @@ -19,13 +19,23 @@ package stackdatasource import ( "context" + "fmt" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" ) +type ResourceKind int + +const ( + Apm ResourceKind = iota + EnterpriseSearch + Kibana +) + func (d *DataSource) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { return tfsdk.Schema{ Attributes: map[string]tfsdk.Attribute{ @@ -68,41 +78,113 @@ func (d *DataSource) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnost Type: types.BoolType, Computed: true, }, - - "apm": kindResourceSchema(), - "enterprise_search": kindResourceSchema(), - "elasticsearch": kindResourceSchema(), - "kibana": kindResourceSchema(), + "apm": resourceKindConfigSchema(Apm), + "enterprise_search": resourceKindConfigSchema(EnterpriseSearch), + "elasticsearch": elasticSearchConfigSchema(), + "kibana": resourceKindConfigSchema(Kibana), }, }, nil } -func kindResourceSchema() tfsdk.Attribute { - // TODO should we use tfsdk.ListNestedAttributes here? - see https://github.com/hashicorp/terraform-provider-hashicups-pf/blob/8f222d805d39445673e442a674168349a45bc054/hashicups/data_source_coffee.go#L22 +func elasticSearchConfigSchema() tfsdk.Attribute { return tfsdk.Attribute{ - Computed: true, - Type: types.ListType{ElemType: types.ObjectType{ - AttrTypes: resourceKindConfigAttrTypes(), - }}, + Description: "Information for Elasticsearch workloads on this stack version.", + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "denylist": { + Type: types.ListType{ElemType: types.StringType}, + Description: "List of configuration options that cannot be overridden by user settings.", + Computed: true, + }, + "capacity_constraints_max": { + Type: types.Int64Type, + Description: "Minimum size of the instances.", + Computed: true, + }, + "capacity_constraints_min": { + Type: types.Int64Type, + Description: "Maximum size of the instances.", + Computed: true, + }, + "compatible_node_types": { + Type: types.ListType{ElemType: types.StringType}, + Description: "List of node types compatible with this one.", + Computed: true, + }, + "docker_image": { + Type: types.StringType, + Description: "Docker image to use for the Elasticsearch cluster instances.", + Computed: true, + }, + "plugins": { + Type: types.ListType{ElemType: types.StringType}, + Description: "List of available plugins to be specified by users in Elasticsearch cluster instances.", + Computed: true, + }, + "default_plugins": { + Type: types.ListType{ElemType: types.StringType}, + Description: "List of default plugins.", + Computed: true, + }, + // node_types not added. It is highly unlikely they will be used + // for anything, and if they're needed in the future, then we can + // invest on adding them. + }), } } -func resourceKindConfigAttrTypes() map[string]attr.Type { - return map[string]attr.Type{ - "denylist": types.ListType{ElemType: types.StringType}, - "capacity_constraints_max": types.Int64Type, - "capacity_constraints_min": types.Int64Type, - "compatible_node_types": types.ListType{ElemType: types.StringType}, - "docker_image": types.StringType, - "plugins": types.ListType{ElemType: types.StringType}, - "default_plugins": types.ListType{ElemType: types.StringType}, +func elasticSearchConfigAttrTypes() map[string]attr.Type { + return elasticSearchConfigSchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes +} - // node_types not added. It is highly unlikely they will be used - // for anything, and if they're needed in the future, then we can - // invest on adding them. +func resourceKindConfigSchema(resourceKind ResourceKind) tfsdk.Attribute { + var names = map[ResourceKind]string{ + Apm: "APM", + EnterpriseSearch: "Enterprise Search", + Kibana: "Kibana", + } + return tfsdk.Attribute{ + Description: fmt.Sprintf("Information for %s workloads on this stack version.", names[resourceKind]), + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "denylist": { + Type: types.ListType{ElemType: types.StringType}, + Description: "List of configuration options that cannot be overridden by user settings.", + Computed: true, + }, + "capacity_constraints_max": { + Type: types.Int64Type, + Description: "Minimum size of the instances.", + Computed: true, + }, + "capacity_constraints_min": { + Type: types.Int64Type, + Description: "Maximum size of the instances.", + Computed: true, + }, + "compatible_node_types": { + Type: types.ListType{ElemType: types.StringType}, + Description: "List of node types compatible with this one.", + Computed: true, + }, + "docker_image": { + Type: types.StringType, + Description: fmt.Sprintf("Docker image to use for the %s instance.", names[resourceKind]), + Computed: true, + }, + // node_types not added. It is highly unlikely they will be used + // for anything, and if they're needed in the future, then we can + // invest on adding them. + }), } } +func resourceKindConfigAttrTypes(resourceKind ResourceKind) map[string]attr.Type { + return resourceKindConfigSchema(resourceKind).Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes +} + type modelV0 struct { ID types.String `tfsdk:"id"` VersionRegex types.String `tfsdk:"version_regex"` @@ -115,11 +197,11 @@ type modelV0 struct { AllowListed types.Bool `tfsdk:"allowlisted"` Apm types.List `tfsdk:"apm"` //< resourceKindConfigModelV0 EnterpriseSearch types.List `tfsdk:"enterprise_search"` //< resourceKindConfigModelV0 - Elasticsearch types.List `tfsdk:"elasticsearch"` //< resourceKindConfigModelV0 + Elasticsearch types.List `tfsdk:"elasticsearch"` //< elasticSearchConfigModelV0 Kibana types.List `tfsdk:"kibana"` //< resourceKindConfigModelV0 } -type resourceKindConfigModelV0 struct { +type elasticSearchConfigModelV0 struct { DenyList types.List `tfsdk:"denylist"` CapacityConstraintsMax types.Int64 `tfsdk:"capacity_constraints_max"` CapacityConstraintsMin types.Int64 `tfsdk:"capacity_constraints_min"` @@ -128,3 +210,11 @@ type resourceKindConfigModelV0 struct { Plugins types.List `tfsdk:"plugins"` DefaultPlugins types.List `tfsdk:"default_plugins"` } + +type resourceKindConfigModelV0 struct { + DenyList types.List `tfsdk:"denylist"` + CapacityConstraintsMax types.Int64 `tfsdk:"capacity_constraints_max"` + CapacityConstraintsMin types.Int64 `tfsdk:"capacity_constraints_min"` + CompatibleNodeTypes types.List `tfsdk:"compatible_node_types"` + DockerImage types.String `tfsdk:"docker_image"` +} diff --git a/go.mod b/go.mod index 3097048ac..7f8123e5b 100644 --- a/go.mod +++ b/go.mod @@ -3,14 +3,13 @@ module github.com/elastic/terraform-provider-ec go 1.19 require ( - github.com/blang/semver v3.5.1+incompatible github.com/blang/semver/v4 v4.0.0 github.com/elastic/cloud-sdk-go v1.10.0 github.com/go-openapi/runtime v0.24.1 github.com/go-openapi/strfmt v0.21.3 github.com/hashicorp/terraform-plugin-framework v0.12.0 + github.com/hashicorp/terraform-plugin-framework-validators v0.5.0 github.com/hashicorp/terraform-plugin-go v0.14.0 - github.com/hashicorp/terraform-plugin-log v0.7.0 github.com/hashicorp/terraform-plugin-mux v0.7.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.22.0 github.com/stretchr/testify v1.8.0 @@ -48,6 +47,7 @@ require ( github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.17.3 // indirect github.com/hashicorp/terraform-json v0.14.0 // indirect + github.com/hashicorp/terraform-plugin-log v0.7.0 // indirect github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c // indirect github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 // indirect github.com/hashicorp/yamux v0.1.1 // indirect diff --git a/go.sum b/go.sum index 005fd4fa8..670974d02 100644 --- a/go.sum +++ b/go.sum @@ -18,7 +18,6 @@ github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYU github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFUye+ZcSR6opIgz9Co7WcDx6ZcY+RjfFHoA0I= -github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0= github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= @@ -32,8 +31,6 @@ github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:W github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= -github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= @@ -219,7 +216,6 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -252,30 +248,24 @@ github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mO github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/hc-install v0.4.0 h1:cZkRFr1WVa0Ty6x5fTvL1TuO1flul231rWkGH92oYYk= github.com/hashicorp/hc-install v0.4.0/go.mod h1:5d155H8EC5ewegao9A4PUTMNPZaq+TbOzkJJZ4vrXeI= -github.com/hashicorp/hcl/v2 v2.13.0 h1:0Apadu1w6M11dyGFxWnmhhcMjkbAiKCv7G1r/2QgCNc= -github.com/hashicorp/hcl/v2 v2.13.0/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0= github.com/hashicorp/hcl/v2 v2.14.0 h1:jX6+Q38Ly9zaAJlAjnFVyeNSNCKKW8D0wvyg7vij5Wc= github.com/hashicorp/hcl/v2 v2.14.0/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/terraform-exec v0.17.2 h1:EU7i3Fh7vDUI9nNRdMATCEfnm9axzTnad8zszYZ73Go= -github.com/hashicorp/terraform-exec v0.17.2/go.mod h1:tuIbsL2l4MlwwIZx9HPM+LOV9vVyEfBYu2GsO1uH3/8= github.com/hashicorp/terraform-exec v0.17.3 h1:MX14Kvnka/oWGmIkyuyvL6POx25ZmKrjlaclkx3eErU= github.com/hashicorp/terraform-exec v0.17.3/go.mod h1:+NELG0EqQekJzhvikkeQsOAZpsw0cv/03rbeQJqscAI= github.com/hashicorp/terraform-json v0.14.0 h1:sh9iZ1Y8IFJLx+xQiKHGud6/TSUCM0N8e17dKDpqV7s= github.com/hashicorp/terraform-json v0.14.0/go.mod h1:5A9HIWPkk4e5aeeXIBbkcOvaZbIYnAIkEyqP2pNSckM= -github.com/hashicorp/terraform-plugin-framework v0.11.1 h1:rq8f+TLDO4tJu+n9mMYlDrcRoIdrg0gTUvV2Jr0Ya24= -github.com/hashicorp/terraform-plugin-framework v0.11.1/go.mod h1:GENReHOz6GEt8Jk3UN94vk8BdC6irEHFgN3Z9HPhPUU= github.com/hashicorp/terraform-plugin-framework v0.12.0 h1:Bk3l5MQUaZoo5eplr+u1FomYqGS564e8Tp3rutnCfYg= github.com/hashicorp/terraform-plugin-framework v0.12.0/go.mod h1:wcZdk4+Uef6Ng+BiBJjGAcIPlIs5bhlEV/TA1k6Xkq8= +github.com/hashicorp/terraform-plugin-framework-validators v0.5.0 h1:eD79idhnJOBajkUMEbm0c8dOyOb/F49STbUEVojT6F4= +github.com/hashicorp/terraform-plugin-framework-validators v0.5.0/go.mod h1:NfGgclDM3FZqvNVppPKE2aHI1JAyT002ypPRya7ch3I= github.com/hashicorp/terraform-plugin-go v0.14.0 h1:ttnSlS8bz3ZPYbMb84DpcPhY4F5DsQtcAS7cHo8uvP4= github.com/hashicorp/terraform-plugin-go v0.14.0/go.mod h1:2nNCBeRLaenyQEi78xrGrs9hMbulveqG/zDMQSvVJTE= github.com/hashicorp/terraform-plugin-log v0.7.0 h1:SDxJUyT8TwN4l5b5/VkiTIaQgY6R+Y2BQ0sRZftGKQs= github.com/hashicorp/terraform-plugin-log v0.7.0/go.mod h1:p4R1jWBXRTvL4odmEkFfDdhUjHf9zcs/BCoNHAc7IK4= github.com/hashicorp/terraform-plugin-mux v0.7.0 h1:wRbSYzg+v2sn5Mdee0UKm4YTt4wJG0LfSwtgNuBkglY= github.com/hashicorp/terraform-plugin-mux v0.7.0/go.mod h1:Ae30Mc5lz4d1awtiCbHP0YyvgBeiQ00Q1nAq0U3lb+I= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.21.0 h1:eIJjFlI4k6BMso6Wq/bq56U0RukXc4JbwJJ8Oze2/tg= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.21.0/go.mod h1:mYPs/uchNcBq7AclQv9QUtSf9iNcfp1Ag21jqTlDf2M= github.com/hashicorp/terraform-plugin-sdk/v2 v2.22.0 h1:MzfNfrheTt24xbEbA4npUSbX3GYu4xjXS7czcpJFyQY= github.com/hashicorp/terraform-plugin-sdk/v2 v2.22.0/go.mod h1:q1XKSxXg9nDmhV0IvNZNZxe3gcTAHzMqrjs8wX1acng= github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c h1:D8aRO6+mTqHfLsK/BC3j5OAoogv1WLRWzY1AaTo3rBg= @@ -437,8 +427,6 @@ go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4x go.mongodb.org/mongo-driver v1.8.2/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.10.1 h1:NujsPveKwHaWuKUer/ceo9DzEe7HIj1SlJ6uvXZG0S4= -go.mongodb.org/mongo-driver v1.10.1/go.mod h1:z4XpeoU6w+9Vht+jAFyLgVrD+jGSQQe0+CBWFHNiHt8= go.mongodb.org/mongo-driver v1.10.2 h1:4Wk3cnqOrQCn0P92L3/mmurMxzdvWWs5J9jinAVKD+k= go.mongodb.org/mongo-driver v1.10.2/go.mod h1:z4XpeoU6w+9Vht+jAFyLgVrD+jGSQQe0+CBWFHNiHt8= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -455,12 +443,9 @@ golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 h1:Y/gsMcFOcR+6S6f3YeMKl5g+dZMEWqcz5Czj/GWYbkM= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/exp v0.0.0-20220827204233-334a2380cb91 h1:tnebWN09GYg9OLPss1KXj8txwZc6X6uMr6VFdcGNbHw= -golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= golang.org/x/exp v0.0.0-20220914170420-dc92f8653013 h1:ZjglnWxEUdPyXl4o/j4T89SRCI+4X6NW6185PNLEOF4= golang.org/x/exp v0.0.0-20220914170420-dc92f8653013/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -488,8 +473,6 @@ golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1 golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b h1:ZmngSVLe/wycRns9MKikG9OWIEjGcGAkacif7oYQaUY= -golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20220909164309-bea034e7d591 h1:D0B/7al0LLrVC8aWF4+oxpv/m8bc7ViFfVS8/gXGdqI= golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -529,8 +512,6 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220829200755-d48e67d00261 h1:v6hYoSR9T5oet+pMXwUWkbiVqx/63mlHjefrHmxwfeY= -golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220913175220-63ea55921009 h1:PuvuRMeLWqsf/ZdT1UUZz0syhioyv1mzuFZsXs4fvhw= golang.org/x/sys v0.0.0-20220913175220-63ea55921009/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -560,8 +541,6 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf h1:Q5xNKbTSFwkuaaGaR7CMcXEM5sy19KYdUU8iF8/iRC0= -google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= google.golang.org/genproto v0.0.0-20220914210030-581e60b4ef85 h1:lkYqfLZL9+9C+SltHOTeOHL6uueWYYkGp5NoeOZQsis= google.golang.org/genproto v0.0.0-20220914210030-581e60b4ef85/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw= From 993d47d8a8e2315605066efe306cbdb0f1001b76 Mon Sep 17 00:00:00 2001 From: Pascal Hofmann Date: Fri, 23 Sep 2022 10:53:48 +0200 Subject: [PATCH 009/104] Migrate resource ec_deployment_elasticsearch_keystore to terraform-plugin-framework (#10) Co-authored-by: Pascal Hofmann --- .../deployment_elasticsearch_kesytore_test.go | 66 ++++ .../elasticsearchkeystoreresource/create.go | 52 ++- .../elasticsearchkeystoreresource/delete.go | 38 ++- .../expanders.go | 11 +- .../expanders_test.go | 44 +-- .../elasticsearchkeystoreresource/read.go | 74 +++-- .../read_test.go | 121 ------- .../elasticsearchkeystoreresource/resource.go | 41 --- .../resource_test.go | 302 ++++++++++++++++++ .../elasticsearchkeystoreresource/schema.go | 118 +++++-- .../testutils.go | 36 --- .../elasticsearchkeystoreresource/update.go | 47 ++- ec/provider.go | 8 +- 13 files changed, 631 insertions(+), 327 deletions(-) delete mode 100644 ec/ecresource/elasticsearchkeystoreresource/read_test.go delete mode 100644 ec/ecresource/elasticsearchkeystoreresource/resource.go create mode 100644 ec/ecresource/elasticsearchkeystoreresource/resource_test.go delete mode 100644 ec/ecresource/elasticsearchkeystoreresource/testutils.go diff --git a/ec/acc/deployment_elasticsearch_kesytore_test.go b/ec/acc/deployment_elasticsearch_kesytore_test.go index aafcc0eee..d888f7a5e 100644 --- a/ec/acc/deployment_elasticsearch_kesytore_test.go +++ b/ec/acc/deployment_elasticsearch_kesytore_test.go @@ -118,6 +118,72 @@ func TestAccDeploymentElasticsearchKeystore_full(t *testing.T) { }) } +func TestAccDeploymentElasticsearchKeystore_UpgradeFrom0_4_1(t *testing.T) { + resType := "ec_deployment_elasticsearch_keystore" + firstResName := resType + ".test" + secondResName := resType + ".gcs_creds" + randomName := prefix + acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + startCfg := "testdata/deployment_elasticsearch_keystore_1.tf" + + cfgF := func(cfg string) string { + return fixtureAccDeploymentResourceBasic( + t, cfg, randomName, getRegion(), defaultTemplate, + ) + } + + // Required because of a bug - see https://discuss.hashicorp.com/t/acceptance-testing-sdk-framework-upgrade-issue/44166/2 + externalProviderConfig := ` +terraform { + required_providers { + ec = { + source = "elastic/ec" + version = "0.4.1" + } + } +}` + + resource.ParallelTest(t, resource.TestCase{ + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "ec": { + VersionConstraint: "0.4.1", + Source: "elastic/ec", + }, + }, + Config: cfgF(startCfg) + externalProviderConfig, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(firstResName, "setting_name", "xpack.notification.slack.account.hello.secure_url"), + resource.TestCheckResourceAttr(firstResName, "value", "hella"), + resource.TestCheckResourceAttr(firstResName, "as_file", "false"), + resource.TestCheckResourceAttrSet(firstResName, "deployment_id"), + + resource.TestCheckResourceAttr(secondResName, "setting_name", "gcs.client.secondary.credentials_file"), + resource.TestCheckResourceAttr(secondResName, "value", "{\n \"type\": \"service_account\",\n \"project_id\": \"project-id\",\n \"private_key_id\": \"key-id\",\n \"private_key\": \"-----BEGIN PRIVATE KEY-----\\nprivate-key\\n-----END PRIVATE KEY-----\\n\",\n \"client_email\": \"service-account-email\",\n \"client_id\": \"client-id\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/service-account-email\"\n}"), + resource.TestCheckResourceAttr(secondResName, "as_file", "false"), + resource.TestCheckResourceAttrSet(secondResName, "deployment_id"), + ), + }, + { + PlanOnly: true, + ProtoV6ProviderFactories: testAccProviderFactory, + Config: cfgF(startCfg), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(firstResName, "setting_name", "xpack.notification.slack.account.hello.secure_url"), + resource.TestCheckResourceAttr(firstResName, "value", "hella"), + resource.TestCheckResourceAttr(firstResName, "as_file", "false"), + resource.TestCheckResourceAttrSet(firstResName, "deployment_id"), + + resource.TestCheckResourceAttr(secondResName, "setting_name", "gcs.client.secondary.credentials_file"), + resource.TestCheckResourceAttr(secondResName, "value", "{\n \"type\": \"service_account\",\n \"project_id\": \"project-id\",\n \"private_key_id\": \"key-id\",\n \"private_key\": \"-----BEGIN PRIVATE KEY-----\\nprivate-key\\n-----END PRIVATE KEY-----\\n\",\n \"client_email\": \"service-account-email\",\n \"client_id\": \"client-id\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/service-account-email\"\n}"), + resource.TestCheckResourceAttr(secondResName, "as_file", "false"), + resource.TestCheckResourceAttrSet(secondResName, "deployment_id"), + ), + }, + }, + }) +} + func checkESKeystoreResourceID(resourceName string, id *string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] diff --git a/ec/ecresource/elasticsearchkeystoreresource/create.go b/ec/ecresource/elasticsearchkeystoreresource/create.go index 0ac5dc644..e65e58356 100644 --- a/ec/ecresource/elasticsearchkeystoreresource/create.go +++ b/ec/ecresource/elasticsearchkeystoreresource/create.go @@ -22,30 +22,54 @@ import ( "strconv" "strings" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/eskeystoreapi" ) -// create will create an item in the Elasticsearch keystore -func create(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*api.API) - deploymentID := d.Get("deployment_id").(string) - settingName := d.Get("setting_name").(string) +// Create will create an item in the Elasticsearch keystore +func (r Resource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + if !resourceReady(r, &response.Diagnostics) { + return + } + + var newState modelV0 + + diags := request.Plan.Get(ctx, &newState) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } if _, err := eskeystoreapi.Update(eskeystoreapi.UpdateParams{ - API: client, - DeploymentID: deploymentID, - Contents: expandModel(d), + API: r.client, + DeploymentID: newState.DeploymentID.Value, + Contents: expandModel(ctx, newState), }); err != nil { - return diag.FromErr(err) + response.Diagnostics.AddError(err.Error(), err.Error()) + return + } + + newState.ID = types.String{Value: hashID(newState.DeploymentID.Value, newState.SettingName.Value)} + + found, diags := r.read(ctx, newState.DeploymentID.Value, &newState) + response.Diagnostics.Append(diags...) + if !found { + // We can't unset the state here, and must make sure to set the state according to the plan below. + // So all we do is add a warning. + diags.AddWarning( + "Failed to read Elasticsearch keystore.", + "Please run terraform refresh to ensure a consistent state.", + ) + } + if response.Diagnostics.HasError() { + return } - d.SetId(hashID(deploymentID, settingName)) - return read(ctx, d, meta) + // Finally, set the state + response.Diagnostics.Append(response.State.Set(ctx, newState)...) } func hashID(elem ...string) string { diff --git a/ec/ecresource/elasticsearchkeystoreresource/delete.go b/ec/ecresource/elasticsearchkeystoreresource/delete.go index ae951e4bc..2edd3c146 100644 --- a/ec/ecresource/elasticsearchkeystoreresource/delete.go +++ b/ec/ecresource/elasticsearchkeystoreresource/delete.go @@ -20,33 +20,37 @@ package elasticsearchkeystoreresource import ( "context" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/eskeystoreapi" ) -// delete will delete an existing element in the Elasticsearch keystore -func delete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*api.API) - contents := expandModel(d) +// Delete will delete an existing element in the Elasticsearch keystore +func (r Resource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + + if !resourceReady(r, &response.Diagnostics) { + return + } + + var state modelV0 + + diags := request.State.Get(ctx, &state) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } // Since we're using the Update API (PATCH method), we need to se the Value // field to nil for the keystore setting to be unset. - if secret, ok := contents.Secrets[d.Get("setting_name").(string)]; ok { - secret.Value = nil - } + state.Value = types.String{Null: true} + contents := expandModel(ctx, state) if _, err := eskeystoreapi.Update(eskeystoreapi.UpdateParams{ - API: client, - DeploymentID: d.Get("deployment_id").(string), + API: r.client, + DeploymentID: state.DeploymentID.Value, Contents: contents, }); err != nil { - return diag.FromErr(err) + response.Diagnostics.AddError(err.Error(), err.Error()) } - - d.SetId("") - return read(ctx, d, meta) } diff --git a/ec/ecresource/elasticsearchkeystoreresource/expanders.go b/ec/ecresource/elasticsearchkeystoreresource/expanders.go index 74dd423e2..e5a9a766b 100644 --- a/ec/ecresource/elasticsearchkeystoreresource/expanders.go +++ b/ec/ecresource/elasticsearchkeystoreresource/expanders.go @@ -18,18 +18,17 @@ package elasticsearchkeystoreresource import ( + "context" "encoding/json" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" ) -func expandModel(d *schema.ResourceData) *models.KeystoreContents { +func expandModel(ctx context.Context, state modelV0) *models.KeystoreContents { var value interface{} - secretName := d.Get("setting_name").(string) - strVal := d.Get("value").(string) + secretName := state.SettingName.Value + strVal := state.Value.Value // Tries to unmarshal the contents of the value into an `interface{}`, // if it fails, then the contents aren't a JSON object. @@ -40,7 +39,7 @@ func expandModel(d *schema.ResourceData) *models.KeystoreContents { return &models.KeystoreContents{ Secrets: map[string]models.KeystoreSecret{ secretName: { - AsFile: ec.Bool(d.Get("as_file").(bool)), + AsFile: ec.Bool(state.AsFile.Value), Value: value, }, }, diff --git a/ec/ecresource/elasticsearchkeystoreresource/expanders_test.go b/ec/ecresource/elasticsearchkeystoreresource/expanders_test.go index 6cc4a12cb..6fd977465 100644 --- a/ec/ecresource/elasticsearchkeystoreresource/expanders_test.go +++ b/ec/ecresource/elasticsearchkeystoreresource/expanders_test.go @@ -18,11 +18,13 @@ package elasticsearchkeystoreresource import ( + "context" "testing" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/stretchr/testify/assert" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" @@ -30,7 +32,7 @@ import ( func Test_expandModel(t *testing.T) { type args struct { - d *schema.ResourceData + state modelV0 } tests := []struct { name string @@ -39,14 +41,14 @@ func Test_expandModel(t *testing.T) { }{ { name: "parses the resource with a string value", - args: args{d: newResourceData(t, resDataParams{ - ID: "some-random-id", - Resources: map[string]interface{}{ - "deployment_id": mock.ValidClusterID, - "setting_name": "my_secret", - "value": "supersecret", - }, - })}, + args: args{state: modelV0{ + + ID: types.String{Value: "some-random-id"}, + DeploymentID: types.String{Value: mock.ValidClusterID}, + SettingName: types.String{Value: "my_secret"}, + Value: types.String{Value: "supersecret"}, + AsFile: types.Bool{Value: false}, + }}, want: &models.KeystoreContents{ Secrets: map[string]models.KeystoreSecret{ "my_secret": { @@ -58,12 +60,12 @@ func Test_expandModel(t *testing.T) { }, { name: "parses the resource with a json formatted value", - args: args{d: newResourceData(t, resDataParams{ - ID: "some-random-id", - Resources: map[string]interface{}{ - "deployment_id": mock.ValidClusterID, - "setting_name": "my_secret", - "value": `{ + args: args{state: modelV0{ + + ID: types.String{Value: "some-random-id"}, + DeploymentID: types.String{Value: mock.ValidClusterID}, + SettingName: types.String{Value: "my_secret"}, + Value: types.String{Value: `{ "type": "service_account", "project_id": "project-id", "private_key_id": "key-id", @@ -74,10 +76,10 @@ func Test_expandModel(t *testing.T) { "token_uri": "https://accounts.google.com/o/oauth2/token", "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/service-account-email" -}`, - "as_file": true, - }, - })}, +}`}, + AsFile: types.Bool{Value: true}, + }, + }, want: &models.KeystoreContents{ Secrets: map[string]models.KeystoreSecret{ "my_secret": { @@ -101,7 +103,7 @@ func Test_expandModel(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := expandModel(tt.args.d) + got := expandModel(context.Background(), tt.args.state) assert.Equal(t, tt.want, got) }) } diff --git a/ec/ecresource/elasticsearchkeystoreresource/read.go b/ec/ecresource/elasticsearchkeystoreresource/read.go index a6710b384..9968c7fd1 100644 --- a/ec/ecresource/elasticsearchkeystoreresource/read.go +++ b/ec/ecresource/elasticsearchkeystoreresource/read.go @@ -20,52 +20,68 @@ package elasticsearchkeystoreresource import ( "context" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/eskeystoreapi" + "github.com/elastic/cloud-sdk-go/pkg/models" ) -// read queries the remote Elasticsearch keystore state and updates the local state. -func read(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var client = meta.(*api.API) - deploymentID := d.Get("deployment_id").(string) +// Read queries the remote Elasticsearch keystore state and updates the local state. +func (r Resource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + if !resourceReady(r, &response.Diagnostics) { + return + } + + var newState modelV0 + + diags := request.State.Get(ctx, &newState) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + found, diags := r.read(ctx, newState.DeploymentID.Value, &newState) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + if !found { + response.State.RemoveResource(ctx) + return + } + // Finally, set the state + response.Diagnostics.Append(response.State.Set(ctx, newState)...) +} + +func (r Resource) read(ctx context.Context, deploymentID string, state *modelV0) (found bool, diags diag.Diagnostics) { res, err := eskeystoreapi.Get(eskeystoreapi.GetParams{ - API: client, + API: r.client, DeploymentID: deploymentID, }) if err != nil { - return diag.FromErr(err) - } - - if err := modelToState(d, res); err != nil { - return diag.FromErr(err) + diags.AddError(err.Error(), err.Error()) + return true, diags } - return nil + return modelToState(ctx, res, state) } -// This modelToState function is a little different than others in that it does +// This modelToState function is a little different from others in that it does // not set any other fields than "as_file". This is because the "value" is not -// returned by the API for obvious reasons and thus we cannot reconcile that the +// returned by the API for obvious reasons, and thus we cannot reconcile that the // value of the secret is the same in the remote as it is in the configuration. -func modelToState(d *schema.ResourceData, res *models.KeystoreContents) error { - if secret, ok := res.Secrets[d.Get("setting_name").(string)]; ok { +func modelToState(ctx context.Context, res *models.KeystoreContents, state *modelV0) (found bool, diags diag.Diagnostics) { + if secret, ok := res.Secrets[state.SettingName.Value]; ok { if secret.AsFile != nil { - if err := d.Set("as_file", *secret.AsFile); err != nil { - return err - } + state.AsFile = types.Bool{Value: *secret.AsFile} } - return nil + return true, nil } - // When the secret is not found in the returned map of secrets, set the id - // to an empty string so that the resource is marked as destroyed. Would - // only happen if secrets are removed from the underlying Deployment. - d.SetId("") - return nil + // When the secret is not found in the returned map of secrets, the resource should be removed from state. + // Would only happen if secrets are removed from the underlying Deployment. + return false, nil } diff --git a/ec/ecresource/elasticsearchkeystoreresource/read_test.go b/ec/ecresource/elasticsearchkeystoreresource/read_test.go deleted file mode 100644 index 2791e43ca..000000000 --- a/ec/ecresource/elasticsearchkeystoreresource/read_test.go +++ /dev/null @@ -1,121 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package elasticsearchkeystoreresource - -import ( - "testing" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" - - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" -) - -func Test_modelToState(t *testing.T) { - esKeystoreSchemaArg := schema.TestResourceDataRaw(t, newSchema(), map[string]interface{}{ - "deployment_id": mock.ValidClusterID, - "setting_name": "my_secret", - "value": "supersecret", - "as_file": false, // This field is overridden. - }) - esKeystoreSchemaArg.SetId(mock.ValidClusterID) - - esKeystoreSchemaArgMissing := schema.TestResourceDataRaw(t, newSchema(), map[string]interface{}{ - "deployment_id": mock.ValidClusterID, - "setting_name": "my_secret", - "value": "supersecret", - "as_file": false, - }) - esKeystoreSchemaArgMissing.SetId(mock.ValidClusterID) - - type args struct { - d *schema.ResourceData - res *models.KeystoreContents - } - tests := []struct { - name string - args args - want *schema.ResourceData - err error - }{ - { - name: "flattens the keystore secret (not really since the value is not returned)", - args: args{ - d: esKeystoreSchemaArg, - res: &models.KeystoreContents{ - Secrets: map[string]models.KeystoreSecret{ - "my_secret": { - AsFile: ec.Bool(true), - }, - "some_other_secret": { - AsFile: ec.Bool(false), - }, - }, - }, - }, - want: newResourceData(t, resDataParams{ - ID: mock.ValidClusterID, - Resources: map[string]interface{}{ - "deployment_id": mock.ValidClusterID, - "setting_name": "my_secret", - "value": "supersecret", - "as_file": true, - }, - }), - }, - { - name: "unsets the ID when our secret is not in the returned list of secrets", - args: args{ - d: esKeystoreSchemaArgMissing, - res: &models.KeystoreContents{ - Secrets: map[string]models.KeystoreSecret{ - "my_other_secret": { - AsFile: ec.Bool(true), - }, - "some_other_secret": { - AsFile: ec.Bool(false), - }, - }, - }, - }, - want: newResourceData(t, resDataParams{ - Resources: map[string]interface{}{}, - }), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := modelToState(tt.args.d, tt.args.res) - if tt.err != nil || err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } - - wantState := tt.want.State() - gotState := tt.args.d.State() - - if wantState != nil && gotState != nil { - assert.Equal(t, wantState.Attributes, gotState.Attributes) - return - } - }) - } -} diff --git a/ec/ecresource/elasticsearchkeystoreresource/resource.go b/ec/ecresource/elasticsearchkeystoreresource/resource.go deleted file mode 100644 index 70fefacb0..000000000 --- a/ec/ecresource/elasticsearchkeystoreresource/resource.go +++ /dev/null @@ -1,41 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package elasticsearchkeystoreresource - -import ( - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -// Resource returns the ec_deployment_elasticsearch_keystore resource schema. -func Resource() *schema.Resource { - return &schema.Resource{ - Description: "Elastic Cloud deployment Elasticsearch keystore", - Schema: newSchema(), - - CreateContext: create, - ReadContext: read, - UpdateContext: update, - DeleteContext: delete, - - Timeouts: &schema.ResourceTimeout{ - Default: schema.DefaultTimeout(5 * time.Minute), - }, - } -} diff --git a/ec/ecresource/elasticsearchkeystoreresource/resource_test.go b/ec/ecresource/elasticsearchkeystoreresource/resource_test.go new file mode 100644 index 000000000..0139913d2 --- /dev/null +++ b/ec/ecresource/elasticsearchkeystoreresource/resource_test.go @@ -0,0 +1,302 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package elasticsearchkeystoreresource_test + +import ( + "net/url" + "regexp" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/providerserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + r "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + + "github.com/elastic/terraform-provider-ec/ec" +) + +func TestResourceElasticsearchKeyStore(t *testing.T) { + + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + readDeployment(), + createResponse(), + readDeployment(), + readResponse(), + readDeployment(), + readResponse(), + readDeployment(), + readResponse(), + readDeployment(), + updateResponse(), + readDeployment(), + readResponse(), + readDeployment(), + readResponse(), + readDeployment(), + readResponse(), + readDeployment(), + deleteResponse(), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: externalKeystore1, + Check: checkResource1(), + }, + { // Update resource + Config: externalKeystore2, + Check: checkResource2(), + }, + { // Delete resource + Destroy: true, + Config: externalKeystore1, + }, + }, + }) +} + +func TestResourceElasticsearchKeyStore_failedCreate(t *testing.T) { + + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + mock.New500Response(mock.SampleInternalError().Response.Body), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: externalKeystore1, + ExpectError: regexp.MustCompile(`internal.server.error: There was an internal server error`), + }, + }, + }) +} + +func TestResourceElasticsearchKeyStore_failedReadAfterCreate(t *testing.T) { + + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + readDeployment(), + createResponse(), + mock.New500Response(mock.SampleInternalError().Response.Body), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: externalKeystore1, + ExpectError: regexp.MustCompile(`internal.server.error: There was an internal server error`), + }, + }, + }) +} + +func TestResourceElasticsearchKeyStore_notFoundAfterCreate_and_gracefulDeletion(t *testing.T) { + + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + readDeployment(), + createResponse(), + readDeployment(), + emptyReadResponse(), + readDeployment(), + emptyReadResponse(), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: externalKeystore1, + Check: checkResource1(), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +const externalKeystore1 = ` +resource "ec_deployment_elasticsearch_keystore" "test" { + deployment_id = "0a592ab2c5baf0fa95c77ac62135782e" + setting_name = "xpack.notification.slack.account.hello.secure_url" + value = "hella" +} +` + +const externalKeystore2 = ` +resource "ec_deployment_elasticsearch_keystore" "test" { + deployment_id = "0a592ab2c5baf0fa95c77ac62135782e" + setting_name = "xpack.notification.slack.account.hello.secure_url" + value = < Date: Fri, 23 Sep 2022 06:24:12 +0200 Subject: [PATCH 010/104] Update dependencies and remove external provider workaround from migration tests --- .../deployment_elasticsearch_kesytore_test.go | 13 +------- ...loyment_traffic_filter_association_test.go | 12 +------ ec/acc/deployment_traffic_filter_test.go | 12 +------ .../deploymentdatasource/datasource.go | 2 -- .../deploymentsdatasource/datasource.go | 2 -- ec/ecdatasource/stackdatasource/datasource.go | 2 -- .../elasticsearchkeystoreresource/schema.go | 3 -- .../trafficfilterassocresource/schema.go | 2 -- ec/ecresource/trafficfilterresource/schema.go | 2 -- ec/provider.go | 2 -- go.mod | 16 +++++----- go.sum | 32 +++++++++---------- 12 files changed, 27 insertions(+), 73 deletions(-) diff --git a/ec/acc/deployment_elasticsearch_kesytore_test.go b/ec/acc/deployment_elasticsearch_kesytore_test.go index d888f7a5e..9b15d7524 100644 --- a/ec/acc/deployment_elasticsearch_kesytore_test.go +++ b/ec/acc/deployment_elasticsearch_kesytore_test.go @@ -131,17 +131,6 @@ func TestAccDeploymentElasticsearchKeystore_UpgradeFrom0_4_1(t *testing.T) { ) } - // Required because of a bug - see https://discuss.hashicorp.com/t/acceptance-testing-sdk-framework-upgrade-issue/44166/2 - externalProviderConfig := ` -terraform { - required_providers { - ec = { - source = "elastic/ec" - version = "0.4.1" - } - } -}` - resource.ParallelTest(t, resource.TestCase{ Steps: []resource.TestStep{ { @@ -151,7 +140,7 @@ terraform { Source: "elastic/ec", }, }, - Config: cfgF(startCfg) + externalProviderConfig, + Config: cfgF(startCfg), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(firstResName, "setting_name", "xpack.notification.slack.account.hello.secure_url"), resource.TestCheckResourceAttr(firstResName, "value", "hella"), diff --git a/ec/acc/deployment_traffic_filter_association_test.go b/ec/acc/deployment_traffic_filter_association_test.go index 161d8189e..05e0b0010 100644 --- a/ec/acc/deployment_traffic_filter_association_test.go +++ b/ec/acc/deployment_traffic_filter_association_test.go @@ -81,16 +81,6 @@ func TestAccDeploymentTrafficFilterAssociation_UpgradeFrom0_4_1(t *testing.T) { cfg := fixtureAccDeploymentTrafficFilterResourceAssociationBasic(t, startCfg, randomName, getRegion(), defaultTemplate) ignoreChangesCfg := fixtureAccDeploymentTrafficFilterResourceAssociationBasic(t, ignoreChangesCfgFile, randomName, getRegion(), defaultTemplate) - // Required because of a bug - see https://discuss.hashicorp.com/t/acceptance-testing-sdk-framework-upgrade-issue/44166/2 - externalProviderConfig := ` -terraform { - required_providers { - ec = { - source = "elastic/ec" - version = "0.4.1" - } - } -}` resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, CheckDestroy: testAccDeploymentTrafficFilterDestroy, @@ -105,7 +95,7 @@ terraform { // Expects a non-empty plan since "ec_deployment.traffic_filter" // will have changes due to the traffic filter association. ExpectNonEmptyPlan: true, - Config: cfg + externalProviderConfig, + Config: cfg, Check: checkBasicDeploymentTrafficFilterAssociationResource( resName, resAssocName, randomName, resource.TestCheckResourceAttr(resName, "include_by_default", "false"), diff --git a/ec/acc/deployment_traffic_filter_test.go b/ec/acc/deployment_traffic_filter_test.go index c2fd78adf..46ca08f84 100644 --- a/ec/acc/deployment_traffic_filter_test.go +++ b/ec/acc/deployment_traffic_filter_test.go @@ -119,16 +119,6 @@ func TestAccDeploymentTrafficFilter_UpgradeFrom0_4_1(t *testing.T) { startCfg := "testdata/deployment_traffic_filter_basic.tf" cfg := fixtureAccDeploymentTrafficFilterResourceBasic(t, startCfg, randomName, getRegion()) - // Required because of a bug - see https://discuss.hashicorp.com/t/acceptance-testing-sdk-framework-upgrade-issue/44166/2 - externalProviderConfig := ` -terraform { - required_providers { - ec = { - source = "elastic/ec" - version = "0.4.1" - } - } -}` resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, CheckDestroy: testAccDeploymentTrafficFilterDestroy, @@ -140,7 +130,7 @@ terraform { Source: "elastic/ec", }, }, - Config: cfg + externalProviderConfig, + Config: cfg, Check: checkBasicDeploymentTrafficFilterResource(resName, randomName, resource.TestCheckResourceAttr(resName, "include_by_default", "false"), resource.TestCheckResourceAttr(resName, "type", "ip"), diff --git a/ec/ecdatasource/deploymentdatasource/datasource.go b/ec/ecdatasource/deploymentdatasource/datasource.go index ec5e5b396..857d6cff7 100644 --- a/ec/ecdatasource/deploymentdatasource/datasource.go +++ b/ec/ecdatasource/deploymentdatasource/datasource.go @@ -37,8 +37,6 @@ import ( var _ datasource.DataSource = &DataSource{} var _ datasource.DataSourceWithConfigure = &DataSource{} -var _ datasource.DataSourceWithGetSchema = &DataSource{} -var _ datasource.DataSourceWithMetadata = &DataSource{} type DataSource struct { client *api.API diff --git a/ec/ecdatasource/deploymentsdatasource/datasource.go b/ec/ecdatasource/deploymentsdatasource/datasource.go index 08139b485..f0252df40 100644 --- a/ec/ecdatasource/deploymentsdatasource/datasource.go +++ b/ec/ecdatasource/deploymentsdatasource/datasource.go @@ -37,8 +37,6 @@ import ( var _ datasource.DataSource = &DataSource{} var _ datasource.DataSourceWithConfigure = &DataSource{} -var _ datasource.DataSourceWithGetSchema = &DataSource{} -var _ datasource.DataSourceWithMetadata = &DataSource{} type DataSource struct { client *api.API diff --git a/ec/ecdatasource/stackdatasource/datasource.go b/ec/ecdatasource/stackdatasource/datasource.go index aa693ff84..6acf139a1 100644 --- a/ec/ecdatasource/stackdatasource/datasource.go +++ b/ec/ecdatasource/stackdatasource/datasource.go @@ -36,8 +36,6 @@ import ( var _ datasource.DataSource = &DataSource{} var _ datasource.DataSourceWithConfigure = &DataSource{} -var _ datasource.DataSourceWithGetSchema = &DataSource{} -var _ datasource.DataSourceWithMetadata = &DataSource{} type DataSource struct { client *api.API diff --git a/ec/ecresource/elasticsearchkeystoreresource/schema.go b/ec/ecresource/elasticsearchkeystoreresource/schema.go index 679059e99..ed4746566 100644 --- a/ec/ecresource/elasticsearchkeystoreresource/schema.go +++ b/ec/ecresource/elasticsearchkeystoreresource/schema.go @@ -33,9 +33,6 @@ import ( // Ensure provider defined types fully satisfy framework interfaces var _ resource.Resource = &Resource{} var _ resource.ResourceWithConfigure = &Resource{} -var _ resource.ResourceWithGetSchema = &Resource{} - -var _ resource.ResourceWithMetadata = &Resource{} func (r *Resource) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { return tfsdk.Schema{ diff --git a/ec/ecresource/trafficfilterassocresource/schema.go b/ec/ecresource/trafficfilterassocresource/schema.go index 5ced1a99c..be24bbb14 100644 --- a/ec/ecresource/trafficfilterassocresource/schema.go +++ b/ec/ecresource/trafficfilterassocresource/schema.go @@ -33,9 +33,7 @@ import ( // Ensure provider defined types fully satisfy framework interfaces var _ resource.Resource = &Resource{} var _ resource.ResourceWithConfigure = &Resource{} -var _ resource.ResourceWithGetSchema = &Resource{} var _ resource.ResourceWithImportState = &Resource{} -var _ resource.ResourceWithMetadata = &Resource{} const entityTypeDeployment = "deployment" diff --git a/ec/ecresource/trafficfilterresource/schema.go b/ec/ecresource/trafficfilterresource/schema.go index 5d5cb23d3..b5eaca07c 100644 --- a/ec/ecresource/trafficfilterresource/schema.go +++ b/ec/ecresource/trafficfilterresource/schema.go @@ -36,9 +36,7 @@ import ( // Ensure provider defined types fully satisfy framework interfaces var _ resource.Resource = &Resource{} var _ resource.ResourceWithConfigure = &Resource{} -var _ resource.ResourceWithGetSchema = &Resource{} var _ resource.ResourceWithImportState = &Resource{} -var _ resource.ResourceWithMetadata = &Resource{} func (r *Resource) GetSchema(_ context.Context) (tfsdk.Schema, diag.Diagnostics) { return tfsdk.Schema{ diff --git a/ec/provider.go b/ec/provider.go index 9f1dd2631..d076cc8af 100644 --- a/ec/provider.go +++ b/ec/provider.go @@ -149,8 +149,6 @@ func ProviderWithClient(client *api.API, version string) provider.Provider { var _ provider.Provider = (*Provider)(nil) var _ provider.ProviderWithMetadata = (*Provider)(nil) -var _ provider.ProviderWithDataSources = (*Provider)(nil) -var _ provider.ProviderWithResources = (*Provider)(nil) type Provider struct { version string diff --git a/go.mod b/go.mod index 7f8123e5b..5ea92a68b 100644 --- a/go.mod +++ b/go.mod @@ -7,13 +7,13 @@ require ( github.com/elastic/cloud-sdk-go v1.10.0 github.com/go-openapi/runtime v0.24.1 github.com/go-openapi/strfmt v0.21.3 - github.com/hashicorp/terraform-plugin-framework v0.12.0 + github.com/hashicorp/terraform-plugin-framework v0.13.0 github.com/hashicorp/terraform-plugin-framework-validators v0.5.0 github.com/hashicorp/terraform-plugin-go v0.14.0 github.com/hashicorp/terraform-plugin-mux v0.7.0 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.22.0 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.23.0 github.com/stretchr/testify v1.8.0 - golang.org/x/exp v0.0.0-20220914170420-dc92f8653013 + golang.org/x/exp v0.0.0-20220921164117-439092de6870 ) require ( @@ -37,7 +37,7 @@ require ( github.com/hashicorp/go-checkpoint v0.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 // indirect - github.com/hashicorp/go-hclog v1.3.0 // indirect + github.com/hashicorp/go-hclog v1.3.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-plugin v1.4.5 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect @@ -69,12 +69,12 @@ require ( github.com/vmihailenco/tagparser v0.1.2 // indirect github.com/zclconf/go-cty v1.11.0 // indirect go.mongodb.org/mongo-driver v1.10.2 // indirect - golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 // indirect - golang.org/x/net v0.0.0-20220909164309-bea034e7d591 // indirect - golang.org/x/sys v0.0.0-20220913175220-63ea55921009 // indirect + golang.org/x/crypto v0.0.0-20220919173607-35f4265a4bc0 // indirect + golang.org/x/net v0.0.0-20220921203646-d300de134e69 // indirect + golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220914210030-581e60b4ef85 // indirect + google.golang.org/genproto v0.0.0-20220921223823-23cae91e6737 // indirect google.golang.org/grpc v1.49.0 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 670974d02..6946a79c1 100644 --- a/go.sum +++ b/go.sum @@ -233,8 +233,8 @@ github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9n github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= -github.com/hashicorp/go-hclog v1.3.0 h1:G0ACM8Z2WilWgPv3Vdzwm3V0BQu/kSmrkVtpe1fy9do= -github.com/hashicorp/go-hclog v1.3.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.3.1 h1:vDwF1DFNZhntP4DAjuTpOw3uEgMUpXh1pB5fW9DqHpo= +github.com/hashicorp/go-hclog v1.3.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-plugin v1.4.5 h1:oTE/oQR4eghggRg8VY7PAz3dr++VwDNBGCcOfIvHpBo= @@ -256,8 +256,8 @@ github.com/hashicorp/terraform-exec v0.17.3 h1:MX14Kvnka/oWGmIkyuyvL6POx25ZmKrjl github.com/hashicorp/terraform-exec v0.17.3/go.mod h1:+NELG0EqQekJzhvikkeQsOAZpsw0cv/03rbeQJqscAI= github.com/hashicorp/terraform-json v0.14.0 h1:sh9iZ1Y8IFJLx+xQiKHGud6/TSUCM0N8e17dKDpqV7s= github.com/hashicorp/terraform-json v0.14.0/go.mod h1:5A9HIWPkk4e5aeeXIBbkcOvaZbIYnAIkEyqP2pNSckM= -github.com/hashicorp/terraform-plugin-framework v0.12.0 h1:Bk3l5MQUaZoo5eplr+u1FomYqGS564e8Tp3rutnCfYg= -github.com/hashicorp/terraform-plugin-framework v0.12.0/go.mod h1:wcZdk4+Uef6Ng+BiBJjGAcIPlIs5bhlEV/TA1k6Xkq8= +github.com/hashicorp/terraform-plugin-framework v0.13.0 h1:tGnqttzZwU3FKc+HasHr2Yi5L81FcQbdc8zQhbBD9jQ= +github.com/hashicorp/terraform-plugin-framework v0.13.0/go.mod h1:wcZdk4+Uef6Ng+BiBJjGAcIPlIs5bhlEV/TA1k6Xkq8= github.com/hashicorp/terraform-plugin-framework-validators v0.5.0 h1:eD79idhnJOBajkUMEbm0c8dOyOb/F49STbUEVojT6F4= github.com/hashicorp/terraform-plugin-framework-validators v0.5.0/go.mod h1:NfGgclDM3FZqvNVppPKE2aHI1JAyT002ypPRya7ch3I= github.com/hashicorp/terraform-plugin-go v0.14.0 h1:ttnSlS8bz3ZPYbMb84DpcPhY4F5DsQtcAS7cHo8uvP4= @@ -266,8 +266,8 @@ github.com/hashicorp/terraform-plugin-log v0.7.0 h1:SDxJUyT8TwN4l5b5/VkiTIaQgY6R github.com/hashicorp/terraform-plugin-log v0.7.0/go.mod h1:p4R1jWBXRTvL4odmEkFfDdhUjHf9zcs/BCoNHAc7IK4= github.com/hashicorp/terraform-plugin-mux v0.7.0 h1:wRbSYzg+v2sn5Mdee0UKm4YTt4wJG0LfSwtgNuBkglY= github.com/hashicorp/terraform-plugin-mux v0.7.0/go.mod h1:Ae30Mc5lz4d1awtiCbHP0YyvgBeiQ00Q1nAq0U3lb+I= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.22.0 h1:MzfNfrheTt24xbEbA4npUSbX3GYu4xjXS7czcpJFyQY= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.22.0/go.mod h1:q1XKSxXg9nDmhV0IvNZNZxe3gcTAHzMqrjs8wX1acng= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.23.0 h1:D4EeQm0piYXIHp6ZH3zjyP2Elq6voC64x3GZptaiefA= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.23.0/go.mod h1:xkJGavPvP9kYS/VbiW8o7JuTNgPwm7Tiw/Ie/b46r4c= github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c h1:D8aRO6+mTqHfLsK/BC3j5OAoogv1WLRWzY1AaTo3rBg= github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c/go.mod h1:Wn3Na71knbXc1G8Lh+yu/dQWWJeFQEpDeJMtWMtlmNI= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 h1:HKLsbzeOsfXmKNpr3GiT18XAblV0BjCbzL8KQAMZGa0= @@ -444,10 +444,10 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 h1:Y/gsMcFOcR+6S6f3YeMKl5g+dZMEWqcz5Czj/GWYbkM= -golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/exp v0.0.0-20220914170420-dc92f8653013 h1:ZjglnWxEUdPyXl4o/j4T89SRCI+4X6NW6185PNLEOF4= -golang.org/x/exp v0.0.0-20220914170420-dc92f8653013/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/crypto v0.0.0-20220919173607-35f4265a4bc0 h1:a5Yg6ylndHHYJqIPrdq0AhvR6KTvDTAvgBtaidhEevY= +golang.org/x/crypto v0.0.0-20220919173607-35f4265a4bc0/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/exp v0.0.0-20220921164117-439092de6870 h1:j8b6j9gzSigH28O5SjSpQSSh9lFd6f5D/q0aHjNTulc= +golang.org/x/exp v0.0.0-20220921164117-439092de6870/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -473,8 +473,8 @@ golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1 golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220909164309-bea034e7d591 h1:D0B/7al0LLrVC8aWF4+oxpv/m8bc7ViFfVS8/gXGdqI= -golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220921203646-d300de134e69 h1:hUJpGDpnfwdJW8iNypFjmSY0sCBEL+spFTZ2eO+Sfps= +golang.org/x/net v0.0.0-20220921203646-d300de134e69/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -512,8 +512,8 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220913175220-63ea55921009 h1:PuvuRMeLWqsf/ZdT1UUZz0syhioyv1mzuFZsXs4fvhw= -golang.org/x/sys v0.0.0-20220913175220-63ea55921009/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 h1:h+EGohizhe9XlX18rfpa8k8RAc5XyaeamM+0VHRd4lc= +golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= @@ -541,8 +541,8 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20220914210030-581e60b4ef85 h1:lkYqfLZL9+9C+SltHOTeOHL6uueWYYkGp5NoeOZQsis= -google.golang.org/genproto v0.0.0-20220914210030-581e60b4ef85/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220921223823-23cae91e6737 h1:K1zaaMdYBXRyX+cwFnxj7M6zwDyumLQMZ5xqwGvjreQ= +google.golang.org/genproto v0.0.0-20220921223823-23cae91e6737/go.mod h1:2r/26NEF3bFmT3eC3aZreahSal0C3Shl8Gi6vyDYqOQ= google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw= google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= From f310127f9eece6b1490fb0b3a7ca8abe53f0c882 Mon Sep 17 00:00:00 2001 From: Pascal Hofmann Date: Tue, 18 Oct 2022 17:45:56 +0200 Subject: [PATCH 011/104] Migrate resource ec_deployment_extension to terraform-plugin-framework (#12) * Migrate resource ec_deployment_extension to terraform-plugin-framework * Remove useless use of ec.Bool() and ec.String() Co-authored-by: Pascal Hofmann --- ec/acc/deployment_extension_basic_test.go | 40 ++ .../deploymentsdatasource/expanders.go | 6 +- .../elasticsearchkeystoreresource/create.go | 10 +- .../expanders.go | 3 +- .../resource_test.go | 6 +- .../elasticsearchkeystoreresource/update.go | 10 +- ec/ecresource/extensionresource/create.go | 96 ++-- .../extensionresource/create_test.go | 94 ---- ec/ecresource/extensionresource/delete.go | 49 +-- .../extensionresource/delete_test.go | 138 ------ ec/ecresource/extensionresource/read.go | 121 ++--- ec/ecresource/extensionresource/read_test.go | 152 ------- ec/ecresource/extensionresource/resource.go | 45 -- .../extensionresource/resource_test.go | 416 ++++++++++++++++++ ec/ecresource/extensionresource/schema.go | 195 +++++--- .../extensionresource/testutil_datastruct.go | 47 -- ec/ecresource/extensionresource/update.go | 104 ++--- .../extensionresource/update_test.go | 194 -------- ec/ecresource/extensionresource/upload.go | 23 +- ec/ecresource/trafficfilterresource/create.go | 5 + .../trafficfilterresource/expanders.go | 9 +- .../trafficfilterresource/resource_test.go | 9 +- ec/ecresource/trafficfilterresource/update.go | 11 +- ec/internal/util/helpers.go | 1 + ec/provider.go | 4 +- 25 files changed, 824 insertions(+), 964 deletions(-) delete mode 100644 ec/ecresource/extensionresource/create_test.go delete mode 100644 ec/ecresource/extensionresource/delete_test.go delete mode 100644 ec/ecresource/extensionresource/read_test.go delete mode 100644 ec/ecresource/extensionresource/resource.go create mode 100644 ec/ecresource/extensionresource/resource_test.go delete mode 100644 ec/ecresource/extensionresource/testutil_datastruct.go delete mode 100644 ec/ecresource/extensionresource/update_test.go diff --git a/ec/acc/deployment_extension_basic_test.go b/ec/acc/deployment_extension_basic_test.go index 7696d6a97..c0fdfc5a0 100644 --- a/ec/acc/deployment_extension_basic_test.go +++ b/ec/acc/deployment_extension_basic_test.go @@ -65,6 +65,46 @@ func TestAccDeploymentExtension_basic(t *testing.T) { }) } +func TestAccDeploymentExtension_UpgradeFrom0_4_1(t *testing.T) { + resName := "ec_deployment_extension.my_extension" + randomName := prefix + acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + + cfg := fixtureAccExtensionBasicWithTF(t, "testdata/extension_basic.tf", randomName, "desc") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + CheckDestroy: testAccDeploymentTrafficFilterDestroy, + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "ec": { + VersionConstraint: "0.4.1", + Source: "elastic/ec", + }, + }, + Config: cfg, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(resName, "name", randomName), + resource.TestCheckResourceAttr(resName, "version", "*"), + resource.TestCheckResourceAttr(resName, "description", "desc"), + resource.TestCheckResourceAttr(resName, "extension_type", "bundle"), + ), + }, + { + PlanOnly: true, + ProtoV6ProviderFactories: testAccProviderFactory, + Config: cfg, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(resName, "name", randomName), + resource.TestCheckResourceAttr(resName, "version", "*"), + resource.TestCheckResourceAttr(resName, "description", "desc"), + resource.TestCheckResourceAttr(resName, "extension_type", "bundle"), + ), + }, + }, + }) +} + func fixtureAccExtensionBasicWithTF(t *testing.T, tfFileName, extensionName, description string) string { t.Helper() diff --git a/ec/ecdatasource/deploymentsdatasource/expanders.go b/ec/ecdatasource/deploymentsdatasource/expanders.go index 635883ab8..7f21c46e7 100644 --- a/ec/ecdatasource/deploymentsdatasource/expanders.go +++ b/ec/ecdatasource/deploymentsdatasource/expanders.go @@ -40,7 +40,7 @@ func expandFilters(ctx context.Context, state modelV0) (*models.SearchRequest, d Prefix: map[string]models.PrefixQuery{ // The "keyword" addition denotes that the query will be using a keyword // field rather than a text field in order to ensure the query is not analyzed - "name.keyword": {Value: ec.String(namePrefix)}, + "name.keyword": {Value: &namePrefix}, }, }) } @@ -180,11 +180,11 @@ func expandResourceFilters(ctx context.Context, resources *types.List, resourceK func newNestedTermQuery(path, term string, value string) *models.QueryContainer { return &models.QueryContainer{ Nested: &models.NestedQuery{ - Path: ec.String(path), + Path: &path, Query: &models.QueryContainer{ Term: map[string]models.TermQuery{ term: { - Value: ec.String(value), + Value: &value, }, }, }, diff --git a/ec/ecresource/elasticsearchkeystoreresource/create.go b/ec/ecresource/elasticsearchkeystoreresource/create.go index e65e58356..dcca8d4ce 100644 --- a/ec/ecresource/elasticsearchkeystoreresource/create.go +++ b/ec/ecresource/elasticsearchkeystoreresource/create.go @@ -57,12 +57,12 @@ func (r Resource) Create(ctx context.Context, request resource.CreateRequest, re found, diags := r.read(ctx, newState.DeploymentID.Value, &newState) response.Diagnostics.Append(diags...) if !found { - // We can't unset the state here, and must make sure to set the state according to the plan below. - // So all we do is add a warning. - diags.AddWarning( - "Failed to read Elasticsearch keystore.", - "Please run terraform refresh to ensure a consistent state.", + response.Diagnostics.AddError( + "Failed to read Elasticsearch keystore after create.", + "Failed to read Elasticsearch keystore after create.", ) + response.State.RemoveResource(ctx) + return } if response.Diagnostics.HasError() { return diff --git a/ec/ecresource/elasticsearchkeystoreresource/expanders.go b/ec/ecresource/elasticsearchkeystoreresource/expanders.go index e5a9a766b..009924796 100644 --- a/ec/ecresource/elasticsearchkeystoreresource/expanders.go +++ b/ec/ecresource/elasticsearchkeystoreresource/expanders.go @@ -22,7 +22,6 @@ import ( "encoding/json" "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" ) func expandModel(ctx context.Context, state modelV0) *models.KeystoreContents { @@ -39,7 +38,7 @@ func expandModel(ctx context.Context, state modelV0) *models.KeystoreContents { return &models.KeystoreContents{ Secrets: map[string]models.KeystoreSecret{ secretName: { - AsFile: ec.Bool(state.AsFile.Value), + AsFile: &state.AsFile.Value, Value: value, }, }, diff --git a/ec/ecresource/elasticsearchkeystoreresource/resource_test.go b/ec/ecresource/elasticsearchkeystoreresource/resource_test.go index 0139913d2..4100b64b4 100644 --- a/ec/ecresource/elasticsearchkeystoreresource/resource_test.go +++ b/ec/ecresource/elasticsearchkeystoreresource/resource_test.go @@ -125,9 +125,9 @@ func TestResourceElasticsearchKeyStore_notFoundAfterCreate_and_gracefulDeletion( ), Steps: []r.TestStep{ { // Create resource - Config: externalKeystore1, - Check: checkResource1(), - ExpectNonEmptyPlan: true, + Config: externalKeystore1, + Check: checkResource1(), + ExpectError: regexp.MustCompile(`Failed to read Elasticsearch keystore after create.`), }, }, }) diff --git a/ec/ecresource/elasticsearchkeystoreresource/update.go b/ec/ecresource/elasticsearchkeystoreresource/update.go index 153ab92a3..303d075d3 100644 --- a/ec/ecresource/elasticsearchkeystoreresource/update.go +++ b/ec/ecresource/elasticsearchkeystoreresource/update.go @@ -54,12 +54,12 @@ func (r Resource) Update(ctx context.Context, request resource.UpdateRequest, re return } if !found { - // We can't unset the state here, and must make sure to set the state according to the plan below. - // So all we do is add a warning. - diags.AddWarning( - "Failed to read Elasticsearch keystore.", - "Please run terraform refresh to ensure a consistent state.", + response.Diagnostics.AddError( + "Failed to read Elasticsearch keystore after update.", + "Failed to read Elasticsearch keystore after update.", ) + response.State.RemoveResource(ctx) + return } // Finally, set the state diff --git a/ec/ecresource/extensionresource/create.go b/ec/ecresource/extensionresource/create.go index 159a47062..980d16132 100644 --- a/ec/ecresource/extensionresource/create.go +++ b/ec/ecresource/extensionresource/create.go @@ -1,73 +1,65 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - package extensionresource import ( "context" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/extensionapi" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/multierror" ) -// createResource will create a new deployment extension -func createResource(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*api.API) +func (r *Resource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + if !resourceReady(r, &response.Diagnostics) { + return + } + + var newState modelV0 + + diags := request.Plan.Get(ctx, &newState) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } - model, err := createRequest(client, d) + model, err := extensionapi.Create( + extensionapi.CreateParams{ + API: r.client, + Name: newState.Name.Value, + Version: newState.Version.Value, + Type: newState.ExtensionType.Value, + Description: newState.Description.Value, + DownloadURL: newState.DownloadURL.Value, + }, + ) if err != nil { - return diag.FromErr(err) + response.Diagnostics.AddError(err.Error(), err.Error()) + return } - d.SetId(*model.ID) + newState.ID = types.String{Value: *model.ID} - if _, ok := d.GetOk("file_path"); ok { - if err := uploadExtension(client, d); err != nil { - return diag.FromErr(multierror.NewPrefixed("failed to upload file", err)) + if !newState.FilePath.IsNull() && newState.FilePath.Value != "" { + response.Diagnostics.Append(r.uploadExtension(newState)...) + if response.Diagnostics.HasError() { + return } } - return readResource(ctx, d, meta) -} - -func createRequest(client *api.API, d *schema.ResourceData) (*models.Extension, error) { - name := d.Get("name").(string) - version := d.Get("version").(string) - extensionType := d.Get("extension_type").(string) - description := d.Get("description").(string) - downloadURL := d.Get("download_url").(string) - body := extensionapi.CreateParams{ - API: client, - Name: name, - Version: version, - Type: extensionType, - Description: description, - DownloadURL: downloadURL, + found, diags := r.read(newState.ID.Value, &newState) + response.Diagnostics.Append(diags...) + if !found { + response.Diagnostics.AddError( + "Failed to read deployment extension after create.", + "Failed to read deployment extension after create.", + ) + response.State.RemoveResource(ctx) + return } - - res, err := extensionapi.Create(body) - if err != nil { - return nil, err + if response.Diagnostics.HasError() { + return } - return res, nil + // Finally, set the state + response.Diagnostics.Append(response.State.Set(ctx, newState)...) } diff --git a/ec/ecresource/extensionresource/create_test.go b/ec/ecresource/extensionresource/create_test.go deleted file mode 100644 index 78e527871..000000000 --- a/ec/ecresource/extensionresource/create_test.go +++ /dev/null @@ -1,94 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package extensionresource - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -func Test_createResource(t *testing.T) { - tc500Err := util.NewResourceData(t, util.ResDataParams{ - ID: "12345678", - State: newExtension(), - Schema: newSchema(), - }) - wantTC500 := util.NewResourceData(t, util.ResDataParams{ - ID: "12345678", - State: newExtension(), - Schema: newSchema(), - }) - - type args struct { - ctx context.Context - d *schema.ResourceData - meta interface{} - } - tests := []struct { - name string - args args - want diag.Diagnostics - wantRD *schema.ResourceData - }{ - { - name: "returns an error when it receives a 500", - args: args{ - d: tc500Err, - meta: api.NewMock(mock.NewErrorResponse(500, mock.APIError{ - Code: "some", Message: "message", - })), - }, - want: diag.Diagnostics{ - { - Severity: diag.Error, - Summary: "api error: 1 error occurred:\n\t* some: message\n\n", - }, - }, - wantRD: wantTC500, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := createResource(tt.args.ctx, tt.args.d, tt.args.meta) - assert.Equal(t, tt.want, got) - var want interface{} - if tt.wantRD != nil { - if s := tt.wantRD.State(); s != nil { - want = s.Attributes - } - } - - var gotState interface{} - if s := tt.args.d.State(); s != nil { - gotState = s.Attributes - } - - assert.Equal(t, want, gotState) - }) - } -} diff --git a/ec/ecresource/extensionresource/delete.go b/ec/ecresource/extensionresource/delete.go index 07152fa9c..a34620769 100644 --- a/ec/ecresource/extensionresource/delete.go +++ b/ec/ecresource/extensionresource/delete.go @@ -1,51 +1,36 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - package extensionresource import ( "context" "errors" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/resource" - "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/extensionapi" "github.com/elastic/cloud-sdk-go/pkg/client/extensions" ) -func deleteResource(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*api.API) +func (r *Resource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + if !resourceReady(r, &response.Diagnostics) { + return + } + + var state modelV0 + + diags := request.State.Get(ctx, &state) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } if err := extensionapi.Delete(extensionapi.DeleteParams{ - API: client, - ExtensionID: d.Id(), + API: r.client, + ExtensionID: state.ID.Value, }); err != nil { - if alreadyDestroyed(err) { - d.SetId("") - return nil + if !alreadyDestroyed(err) { + response.Diagnostics.AddError(err.Error(), err.Error()) } - - return diag.FromErr(err) } - - d.SetId("") - return nil } func alreadyDestroyed(err error) bool { diff --git a/ec/ecresource/extensionresource/delete_test.go b/ec/ecresource/extensionresource/delete_test.go deleted file mode 100644 index 3e5c615b4..000000000 --- a/ec/ecresource/extensionresource/delete_test.go +++ /dev/null @@ -1,138 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package extensionresource - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -func Test_deleteResource(t *testing.T) { - tc200 := util.NewResourceData(t, util.ResDataParams{ - ID: "12345678", - State: newExtension(), - Schema: newSchema(), - }) - wantTC200 := util.NewResourceData(t, util.ResDataParams{ - ID: "12345678", - State: newExtension(), - Schema: newSchema(), - }) - wantTC200.SetId("") - - tc500Err := util.NewResourceData(t, util.ResDataParams{ - ID: "12345678", - State: newExtension(), - Schema: newSchema(), - }) - wantTC500 := util.NewResourceData(t, util.ResDataParams{ - ID: "12345678", - State: newExtension(), - Schema: newSchema(), - }) - - tc404Err := util.NewResourceData(t, util.ResDataParams{ - ID: "12345678", - State: newExtension(), - Schema: newSchema(), - }) - wantTC404 := util.NewResourceData(t, util.ResDataParams{ - ID: "12345678", - State: newExtension(), - Schema: newSchema(), - }) - wantTC404.SetId("") - - type args struct { - ctx context.Context - d *schema.ResourceData - meta interface{} - } - tests := []struct { - name string - args args - want diag.Diagnostics - wantRD *schema.ResourceData - }{ - { - name: "returns nil when it receives a 200", - args: args{ - d: tc200, - meta: api.NewMock(mock.New200Response(nil)), - }, - want: nil, - wantRD: wantTC200, - }, - { - name: "returns an error when it receives a 500", - args: args{ - d: tc500Err, - meta: api.NewMock(mock.NewErrorResponse(500, mock.APIError{ - Code: "some", Message: "message", - })), - }, - want: diag.Diagnostics{ - { - Severity: diag.Error, - Summary: "api error: 1 error occurred:\n\t* some: message\n\n", - }, - }, - wantRD: wantTC500, - }, - { - name: "returns nil and unsets the state when the error is known", - args: args{ - d: tc404Err, - meta: api.NewMock(mock.NewErrorResponse(404, mock.APIError{ - Code: "some", Message: "message", - })), - }, - want: nil, - wantRD: wantTC404, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := deleteResource(tt.args.ctx, tt.args.d, tt.args.meta) - assert.Equal(t, tt.want, got) - var want interface{} - if tt.wantRD != nil { - if s := tt.wantRD.State(); s != nil { - want = s.Attributes - } - } - - var gotState interface{} - if s := tt.args.d.State(); s != nil { - gotState = s.Attributes - } - - assert.Equal(t, want, gotState) - }) - } -} diff --git a/ec/ecresource/extensionresource/read.go b/ec/ecresource/extensionresource/read.go index 05624fc4f..b7372c725 100644 --- a/ec/ecresource/extensionresource/read.go +++ b/ec/ecresource/extensionresource/read.go @@ -1,57 +1,60 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - package extensionresource import ( "context" "errors" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/extensionapi" "github.com/elastic/cloud-sdk-go/pkg/client/extensions" "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/multierror" ) -func readResource(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*api.API) +func (r *Resource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + if !resourceReady(r, &response.Diagnostics) { + return + } + + var newState modelV0 + + diags := request.State.Get(ctx, &newState) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + found, diags := r.read(newState.ID.Value, &newState) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + if !found { + response.State.RemoveResource(ctx) + return + } + + // Finally, set the state + response.Diagnostics.Append(response.State.Set(ctx, newState)...) +} +func (r *Resource) read(id string, state *modelV0) (found bool, diags diag.Diagnostics) { res, err := extensionapi.Get(extensionapi.GetParams{ - API: client, - ExtensionID: d.Id(), + API: r.client, + ExtensionID: id, }) if err != nil { if extensionNotFound(err) { - d.SetId("") - return nil + return false, diags } - - return diag.FromErr(multierror.NewPrefixed("failed reading extension", err)) - } - - if err := modelToState(d, res); err != nil { - return diag.FromErr(err) + diags.AddError("failed reading extension", err.Error()) + return true, diags } - return nil + modelToState(res, state) + return true, diags } func extensionNotFound(err error) bool { @@ -61,40 +64,40 @@ func extensionNotFound(err error) bool { return errors.As(err, &extensionNotFound) } -func modelToState(d *schema.ResourceData, model *models.Extension) error { - if err := d.Set("name", model.Name); err != nil { - return err +func modelToState(model *models.Extension, state *modelV0) { + if model.Name != nil { + state.Name = types.String{Value: *model.Name} + } else { + state.Name = types.String{Null: true} } - if err := d.Set("version", model.Version); err != nil { - return err + if model.Version != nil { + state.Version = types.String{Value: *model.Version} + } else { + state.Version = types.String{Null: true} } - if err := d.Set("extension_type", model.ExtensionType); err != nil { - return err + if model.ExtensionType != nil { + state.ExtensionType = types.String{Value: *model.ExtensionType} + } else { + state.ExtensionType = types.String{Null: true} } - if err := d.Set("description", model.Description); err != nil { - return err - } + state.Description = types.String{Value: model.Description} - if err := d.Set("url", model.URL); err != nil { - return err + if model.URL != nil { + state.URL = types.String{Value: *model.URL} + } else { + state.URL = types.String{Null: true} } - if err := d.Set("download_url", model.DownloadURL); err != nil { - return err - } + state.DownloadURL = types.String{Value: model.DownloadURL} - if filemeta := model.FileMetadata; filemeta != nil { - if err := d.Set("last_modified", filemeta.LastModifiedDate.String()); err != nil { - return err - } - - if err := d.Set("size", filemeta.Size); err != nil { - return err - } + if metadata := model.FileMetadata; metadata != nil { + state.LastModified = types.String{Value: metadata.LastModifiedDate.String()} + state.Size = types.Int64{Value: metadata.Size} + } else { + state.LastModified = types.String{Null: true} + state.Size = types.Int64{Null: true} } - - return nil } diff --git a/ec/ecresource/extensionresource/read_test.go b/ec/ecresource/extensionresource/read_test.go deleted file mode 100644 index 827518986..000000000 --- a/ec/ecresource/extensionresource/read_test.go +++ /dev/null @@ -1,152 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package extensionresource - -import ( - "context" - "testing" - - "github.com/go-openapi/strfmt" - "github.com/stretchr/testify/assert" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -func Test_readResource(t *testing.T) { - tc200 := util.NewResourceData(t, util.ResDataParams{ - ID: "12345678", - State: newExtension(), - Schema: newSchema(), - }) - wantTC200 := util.NewResourceData(t, util.ResDataParams{ - ID: "12345678", - State: newExtension(), - Schema: newSchema(), - }) - - tc500Err := util.NewResourceData(t, util.ResDataParams{ - ID: "12345678", - State: newExtension(), - Schema: newSchema(), - }) - wantTC500 := util.NewResourceData(t, util.ResDataParams{ - ID: "12345678", - State: newExtension(), - Schema: newSchema(), - }) - - tc404Err := util.NewResourceData(t, util.ResDataParams{ - ID: "12345678", - State: newExtension(), - Schema: newSchema(), - }) - wantTC404 := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newExtension(), - Schema: newSchema(), - }) - wantTC404.SetId("") - - lastModified, _ := strfmt.ParseDateTime("2021-01-07T22:13:42.999Z") - type args struct { - ctx context.Context - d *schema.ResourceData - meta interface{} - } - tests := []struct { - name string - args args - want diag.Diagnostics - wantRD *schema.ResourceData - }{ - { - name: "returns nil when it receives a 200", - args: args{ - d: tc200, - meta: api.NewMock(mock.New200StructResponse(models.Extension{ - Name: ec.String("my_extension"), - ExtensionType: ec.String("bundle"), - Description: "my description", - Version: ec.String("*"), - DownloadURL: "https://example.com", - URL: ec.String("repo://1234"), - FileMetadata: &models.ExtensionFileMetadata{ - LastModifiedDate: lastModified, - Size: 1000, - }, - })), - }, - want: nil, - wantRD: wantTC200, - }, - { - name: "returns an error when it receives a 500", - args: args{ - d: tc500Err, - meta: api.NewMock(mock.NewErrorResponse(500, mock.APIError{ - Code: "some", Message: "message", - })), - }, - want: diag.Diagnostics{ - { - Severity: diag.Error, - Summary: "failed reading extension: 1 error occurred:\n\t* api error: some: message\n\n", - }, - }, - wantRD: wantTC500, - }, - { - name: "returns nil and unsets the state when the error is known", - args: args{ - d: tc404Err, - meta: api.NewMock(mock.NewErrorResponse(404, mock.APIError{ - Code: "some", Message: "message", - })), - }, - want: nil, - wantRD: wantTC404, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := readResource(tt.args.ctx, tt.args.d, tt.args.meta) - assert.Equal(t, tt.want, got) - var want interface{} - if tt.wantRD != nil { - if s := tt.wantRD.State(); s != nil { - want = s.Attributes - } - } - - var gotState interface{} - if s := tt.args.d.State(); s != nil { - gotState = s.Attributes - } - - assert.Equal(t, want, gotState) - }) - } -} diff --git a/ec/ecresource/extensionresource/resource.go b/ec/ecresource/extensionresource/resource.go deleted file mode 100644 index 42f7414a4..000000000 --- a/ec/ecresource/extensionresource/resource.go +++ /dev/null @@ -1,45 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package extensionresource - -import ( - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -// Resource returns the ec_deployment_extension resource schema. -func Resource() *schema.Resource { - return &schema.Resource{ - Description: "Elastic Cloud extension (plugin or bundle) to enhance the core functionality of Elasticsearch. Before you install an extension, be sure to check out the supported and official Elasticsearch plugins already available", - Schema: newSchema(), - - CreateContext: createResource, - ReadContext: readResource, - UpdateContext: updateResource, - DeleteContext: deleteResource, - - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - - Timeouts: &schema.ResourceTimeout{ - Default: schema.DefaultTimeout(10 * time.Minute), - }, - } -} diff --git a/ec/ecresource/extensionresource/resource_test.go b/ec/ecresource/extensionresource/resource_test.go new file mode 100644 index 000000000..9a4cb8b1a --- /dev/null +++ b/ec/ecresource/extensionresource/resource_test.go @@ -0,0 +1,416 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package extensionresource_test + +import ( + "net/http" + "net/url" + "regexp" + "testing" + + "github.com/go-openapi/strfmt" + "github.com/hashicorp/terraform-plugin-framework/providerserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + r "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + + provider "github.com/elastic/terraform-provider-ec/ec" +) + +func TestResourceDeploymentExtension(t *testing.T) { + + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + createResponse(), + readResponse1(), + readResponse1(), + readResponse1(), + updateResponse(), + + // Not testing for assertion as the content type is multipart/form-data + // with a boundary that is a randomly generated string which changes every time. + mock.Response{ + Response: http.Response{ + StatusCode: http.StatusOK, + Status: http.StatusText(http.StatusOK), + Body: mock.NewStringBody("{}"), + }, + }, + + readResponse2(), + readResponse2(), + readResponse2(), + deleteResponse(), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: deploymentExtension1, + Check: checkResource1(), + }, + { // Update resource + Config: deploymentExtension2, + Check: checkResource2(), + }, + { // Delete resource + Destroy: true, + Config: deploymentExtension2, + }, + }, + }) +} + +func TestResourceDeploymentExtension_failedCreate(t *testing.T) { + + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + mock.New500Response(mock.SampleInternalError().Response.Body), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: deploymentExtension1, + ExpectError: regexp.MustCompile(`internal.server.error: There was an internal server error`), + }, + }, + }) +} + +func TestResourceDeploymentExtension_failedReadAfterCreate(t *testing.T) { + + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + createResponse(), + mock.New500Response(mock.SampleInternalError().Response.Body), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: deploymentExtension1, + ExpectError: regexp.MustCompile(`internal.server.error: There was an internal server error`), + }, + }, + }) +} + +func TestResourceDeploymentExtension_notFoundAfterCreate(t *testing.T) { + + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + createResponse(), + mock.New404Response(mock.NewStringBody(`{ }`)), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: deploymentExtension1, + ExpectError: regexp.MustCompile(`Failed to read deployment extension after create.`), + }, + }, + }) +} + +func TestResourceDeploymentExtension_failedUpdate(t *testing.T) { + + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + createResponse(), + readResponse1(), + readResponse1(), + readResponse1(), + mock.New500Response(mock.SampleInternalError().Response.Body), + deleteResponse(), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: deploymentExtension1, + Check: checkResource1(), + }, + { // Update resource + Config: deploymentExtension2, + Check: checkResource2(), + ExpectError: regexp.MustCompile(`internal.server.error: There was an internal server error`), + }, + }, + }) +} + +func TestResourceDeploymentExtension_notFoundAfterUpdate(t *testing.T) { + + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + createResponse(), + readResponse1(), + readResponse1(), + readResponse1(), + updateResponse(), + mock.Response{ + Response: http.Response{ + StatusCode: http.StatusOK, + Status: http.StatusText(http.StatusOK), + Body: mock.NewStringBody("{}"), + }, + }, + mock.New404Response(mock.NewStringBody(`{ }`)), + deleteResponse(), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: deploymentExtension1, + Check: checkResource1(), + }, + { // Update resource + Config: deploymentExtension2, + Check: checkResource2(), + ExpectError: regexp.MustCompile(`Failed to read deployment extension after update.`), + }, + }, + }) +} + +func TestResourceDeploymentExtension_failedDelete(t *testing.T) { + + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + createResponse(), + readResponse1(), + readResponse1(), + readResponse1(), + mock.New500Response(mock.SampleInternalError().Response.Body), + deleteResponse(), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: deploymentExtension1, + Check: checkResource1(), + }, + { // Delete resource + Destroy: true, + Config: deploymentExtension2, + ExpectError: regexp.MustCompile(`internal.server.error: There was an internal server error`), + }, + }, + }) +} + +func TestResourceDeploymentExtension_gracefulDeletion(t *testing.T) { + + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + createResponse(), + readResponse1(), + readResponse1(), + readResponse1(), + mock.New404ResponseAssertion( + &mock.RequestAssertion{ + Header: api.DefaultReadMockHeaders, + Method: "DELETE", + Host: api.DefaultMockHost, + Path: "/api/v1/deployments/extensions/someid", + }, + mock.NewStructBody(models.Extension{ + ID: ec.String("{ }"), + }, + ), + ), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: deploymentExtension1, + Check: checkResource1(), + }, + { // Delete resource + Destroy: true, + Config: deploymentExtension2, + }, + }, + }) +} + +const deploymentExtension1 = ` +resource "ec_deployment_extension" "my_extension" { + name = "My extension" + description = "Some description" + version = "*" + extension_type = "bundle" +} +` +const deploymentExtension2 = ` +resource "ec_deployment_extension" "my_extension" { + name = "My updated extension" + description = "Some updated description" + version = "7.10.1" + extension_type = "bundle" + download_url = "https://example.com" + file_path = "testdata/test_extension_bundle.json" + file_hash = "abcd" +} +` + +func checkResource1() r.TestCheckFunc { + resource := "ec_deployment_extension.my_extension" + return r.ComposeAggregateTestCheckFunc( + r.TestCheckResourceAttr(resource, "id", "someid"), + r.TestCheckResourceAttr(resource, "name", "My extension"), + r.TestCheckResourceAttr(resource, "description", "Some description"), + r.TestCheckResourceAttr(resource, "version", "*"), + r.TestCheckResourceAttr(resource, "extension_type", "bundle"), + ) +} + +func checkResource2() r.TestCheckFunc { + resource := "ec_deployment_extension.my_extension" + return r.ComposeAggregateTestCheckFunc( + r.TestCheckResourceAttr(resource, "id", "someid"), + r.TestCheckResourceAttr(resource, "name", "My updated extension"), + r.TestCheckResourceAttr(resource, "description", "Some updated description"), + r.TestCheckResourceAttr(resource, "version", "7.10.1"), + r.TestCheckResourceAttr(resource, "extension_type", "bundle"), + r.TestCheckResourceAttr(resource, "download_url", "https://example.com"), + r.TestCheckResourceAttr(resource, "url", "repo://1234"), + r.TestCheckResourceAttr(resource, "last_modified", "2021-01-07T22:13:42.999Z"), + r.TestCheckResourceAttr(resource, "size", "1000"), + r.TestCheckResourceAttr(resource, "file_path", "testdata/test_extension_bundle.json"), + r.TestCheckResourceAttr(resource, "file_hash", "abcd"), + ) +} + +func createResponse() mock.Response { + return mock.New201ResponseAssertion( + &mock.RequestAssertion{ + Host: api.DefaultMockHost, + Header: api.DefaultWriteMockHeaders, + Method: "POST", + Path: "/api/v1/deployments/extensions", + Query: url.Values{}, + Body: mock.NewStringBody(`{"description":"Some description","extension_type":"bundle","name":"My extension","version":"*"}` + "\n"), + }, + mock.NewStringBody(`{"deployments":null,"description":"Some description","download_url":null,"extension_type":"bundle","id":"someid","name":"My extension","url":null,"version":"*"}`), + ) +} + +func updateResponse() mock.Response { + return mock.New200ResponseAssertion( + &mock.RequestAssertion{ + Host: api.DefaultMockHost, + Header: api.DefaultWriteMockHeaders, + Method: "POST", + Path: "/api/v1/deployments/extensions/someid", + Query: url.Values{}, + Body: mock.NewStringBody(`{"description":"Some updated description","download_url":"https://example.com","extension_type":"bundle","name":"My updated extension","version":"7.10.1"}` + "\n"), + }, + mock.NewStructBody(models.Extension{ + ID: ec.String("someid"), + Name: ec.String("My updated extension"), + Description: "Some updated description", + ExtensionType: ec.String("bundle"), + Version: ec.String("7.10.1"), + DownloadURL: "https://example.com", + URL: ec.String("repo://1234"), + FileMetadata: &models.ExtensionFileMetadata{ + LastModifiedDate: lastModified(), + Size: 1000, + }, + })) +} + +func deleteResponse() mock.Response { + return mock.New200ResponseAssertion( + &mock.RequestAssertion{ + Header: api.DefaultReadMockHeaders, + Method: "DELETE", + Host: api.DefaultMockHost, + Path: "/api/v1/deployments/extensions/someid", + }, + mock.NewStructBody(models.Extension{ + ID: ec.String("someid"), + }), + ) +} + +func readResponse1() mock.Response { + return mock.New200ResponseAssertion( + &mock.RequestAssertion{ + Header: api.DefaultReadMockHeaders, + Method: "GET", + Host: api.DefaultMockHost, + Path: "/api/v1/deployments/extensions/someid", + Query: url.Values{"include_deployments": {"false"}}, + }, + mock.NewStructBody(models.Extension{ + ID: ec.String("someid"), + Name: ec.String("My extension"), + Description: "Some description", + ExtensionType: ec.String("bundle"), + Version: ec.String("*"), + }), + ) +} +func readResponse2() mock.Response { + return mock.New200ResponseAssertion( + &mock.RequestAssertion{ + Header: api.DefaultReadMockHeaders, + Method: "GET", + Host: api.DefaultMockHost, + Path: "/api/v1/deployments/extensions/someid", + Query: url.Values{"include_deployments": {"false"}}, + }, + mock.NewStructBody(models.Extension{ + ID: ec.String("someid"), + Name: ec.String("My updated extension"), + Description: "Some updated description", + ExtensionType: ec.String("bundle"), + Version: ec.String("7.10.1"), + DownloadURL: "https://example.com", + URL: ec.String("repo://1234"), + FileMetadata: &models.ExtensionFileMetadata{ + LastModifiedDate: lastModified(), + Size: 1000, + }, + }), + ) +} + +func lastModified() strfmt.DateTime { + lastModified, _ := strfmt.ParseDateTime("2021-01-07T22:13:42.999Z") + return lastModified +} + +func protoV6ProviderFactoriesWithMockClient(client *api.API) map[string]func() (tfprotov6.ProviderServer, error) { + return map[string]func() (tfprotov6.ProviderServer, error){ + "ec": func() (tfprotov6.ProviderServer, error) { + return providerserver.NewProtocol6(provider.ProviderWithClient(client, "unit-tests"))(), nil + }, + } +} diff --git a/ec/ecresource/extensionresource/schema.go b/ec/ecresource/extensionresource/schema.go index ee3ae22cb..7d323772a 100644 --- a/ec/ecresource/extensionresource/schema.go +++ b/ec/ecresource/extensionresource/schema.go @@ -18,64 +18,151 @@ package extensionresource import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "context" + + "github.com/hashicorp/terraform-plugin-framework-validators/resourcevalidator" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + + "github.com/elastic/terraform-provider-ec/ec/internal" ) -func newSchema() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Description: "Required name of the ruleset", - Required: true, - }, - "description": { - Type: schema.TypeString, - Description: "Description for extension", - Optional: true, - }, - "extension_type": { - Type: schema.TypeString, - Description: "Extension type. bundle or plugin", - Required: true, - }, - "version": { - Type: schema.TypeString, - Description: "Eleasticsearch version", - Required: true, - }, - "download_url": { - Type: schema.TypeString, - Description: "download url", - Optional: true, - }, +// Ensure provider defined types fully satisfy framework interfaces +var _ resource.Resource = &Resource{} +var _ resource.ResourceWithConfigure = &Resource{} +var _ resource.ResourceWithImportState = &Resource{} +var _ resource.ResourceWithConfigValidators = &Resource{} - // Uploading file via API - "file_path": { - Type: schema.TypeString, - Description: "file path", - Optional: true, - RequiredWith: []string{"file_hash"}, - }, - "file_hash": { - Type: schema.TypeString, - Description: "file hash", - Optional: true, - }, +func (r *Resource) GetSchema(_ context.Context) (tfsdk.Schema, diag.Diagnostics) { + return tfsdk.Schema{ + Attributes: map[string]tfsdk.Attribute{ + "name": { + Type: types.StringType, + Description: "Required name of the ruleset", + Required: true, + }, + "description": { + Type: types.StringType, + Description: "Description for extension", + Computed: true, + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: ""}), + }}, + "extension_type": { + Type: types.StringType, + Description: "Extension type. bundle or plugin", + Required: true, + }, + "version": { + Type: types.StringType, + Description: "Elasticsearch version", + Required: true, + }, + "download_url": { + Type: types.StringType, + Description: "download url", + Computed: true, + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: ""}), + }, + }, - "url": { - Type: schema.TypeString, - Description: "", - Computed: true, - }, - "last_modified": { - Type: schema.TypeString, - Description: "", - Computed: true, - }, - "size": { - Type: schema.TypeInt, - Description: "", - Computed: true, + // Uploading file via API + "file_path": { + Type: types.StringType, + Description: "file path", + Optional: true, + }, + "file_hash": { + Type: types.StringType, + Description: "file hash", + Optional: true, + }, + "url": { + Type: types.StringType, + Description: "", + Computed: true, + }, + "last_modified": { + Type: types.StringType, + Description: "", + Computed: true, + }, + "size": { + Type: types.Int64Type, + Description: "", + Computed: true, + }, + // Computed attributes + "id": { + Type: types.StringType, + Computed: true, + MarkdownDescription: "Unique identifier of this resource.", + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, }, + }, nil +} + +func (r *Resource) ConfigValidators(ctx context.Context) []resource.ConfigValidator { + return []resource.ConfigValidator{ + resourcevalidator.RequiredTogether( + path.MatchRoot("file_path"), + path.MatchRoot("file_hash"), + ), } } + +type Resource struct { + client *api.API +} + +func (r *Resource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("id"), request.ID)...) +} + +func resourceReady(r *Resource, dg *diag.Diagnostics) bool { + if r.client == nil { + dg.AddError( + "Unconfigured API Client", + "Expected configured API client. Please report this issue to the provider developers.", + ) + + return false + } + return true +} + +func (r *Resource) Configure(ctx context.Context, request resource.ConfigureRequest, response *resource.ConfigureResponse) { + client, diags := internal.ConvertProviderData(request.ProviderData) + response.Diagnostics.Append(diags...) + r.client = client +} + +func (r *Resource) Metadata(ctx context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { + response.TypeName = request.ProviderTypeName + "_deployment_extension" +} + +type modelV0 struct { + ID types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + Description types.String `tfsdk:"description"` + ExtensionType types.String `tfsdk:"extension_type"` + Version types.String `tfsdk:"version"` + DownloadURL types.String `tfsdk:"download_url"` + FilePath types.String `tfsdk:"file_path"` + FileHash types.String `tfsdk:"file_hash"` + URL types.String `tfsdk:"url"` + LastModified types.String `tfsdk:"last_modified"` + Size types.Int64 `tfsdk:"size"` +} diff --git a/ec/ecresource/extensionresource/testutil_datastruct.go b/ec/ecresource/extensionresource/testutil_datastruct.go deleted file mode 100644 index 61a79ccdb..000000000 --- a/ec/ecresource/extensionresource/testutil_datastruct.go +++ /dev/null @@ -1,47 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package extensionresource - -func newExtension() map[string]interface{} { - return map[string]interface{}{ - "name": "my_extension", - "extension_type": "bundle", - "description": "my description", - "version": "*", - "download_url": "https://example.com", - "url": "repo://1234", - "last_modified": "2021-01-07T22:13:42.999Z", - "size": 1000, - } -} - -func newExtensionWithFilePath() map[string]interface{} { - return map[string]interface{}{ - "name": "my_extension", - "extension_type": "bundle", - "description": "my description", - "version": "*", - "download_url": "https://example.com", - "url": "repo://1234", - "last_modified": "2021-01-07T22:13:42.999Z", - "size": 1000, - - "file_path": "testdata/test_extension_bundle.json", - "file_hash": "abcd", - } -} diff --git a/ec/ecresource/extensionresource/update.go b/ec/ecresource/extensionresource/update.go index 613f0a780..1cb3b37e9 100644 --- a/ec/ecresource/extensionresource/update.go +++ b/ec/ecresource/extensionresource/update.go @@ -1,72 +1,74 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - package extensionresource import ( "context" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/resource" - "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/extensionapi" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/multierror" ) -func updateResource(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*api.API) +func (r *Resource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + if !resourceReady(r, &response.Diagnostics) { + return + } - _, err := updateRequest(client, d) - if err != nil { - return diag.FromErr(err) + var oldState modelV0 + var newState modelV0 + + diags := request.State.Get(ctx, &oldState) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return } - if _, ok := d.GetOk("file_path"); ok && d.HasChanges("file_hash", "last_modified", "size") { - if err := uploadExtension(client, d); err != nil { - return diag.FromErr(multierror.NewPrefixed("failed to upload file", err)) - } + diags = request.Plan.Get(ctx, &newState) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return } - return readResource(ctx, d, meta) -} + _, err := extensionapi.Update( + extensionapi.UpdateParams{ + API: r.client, + ExtensionID: newState.ID.Value, + Name: newState.Name.Value, + Version: newState.Version.Value, + Type: newState.ExtensionType.Value, + Description: newState.Description.Value, + DownloadURL: newState.DownloadURL.Value, + }, + ) + if err != nil { + response.Diagnostics.AddError(err.Error(), err.Error()) + return + } -func updateRequest(client *api.API, d *schema.ResourceData) (*models.Extension, error) { - name := d.Get("name").(string) - version := d.Get("version").(string) - extensionType := d.Get("extension_type").(string) - description := d.Get("description").(string) - downloadURL := d.Get("download_url").(string) + hasChanges := !oldState.FileHash.Equal(newState.FileHash) || + !oldState.LastModified.Equal(newState.LastModified) || + !oldState.Size.Equal(newState.Size) - body := extensionapi.UpdateParams{ - API: client, - ExtensionID: d.Id(), - Name: name, - Version: version, - Type: extensionType, - Description: description, - DownloadURL: downloadURL, + if !newState.FilePath.IsNull() && newState.FilePath.Value != "" && hasChanges { + response.Diagnostics.Append(r.uploadExtension(newState)...) + if response.Diagnostics.HasError() { + return + } } - res, err := extensionapi.Update(body) - if err != nil { - return nil, err + found, diags := r.read(newState.ID.Value, &newState) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + if !found { + response.Diagnostics.AddError( + "Failed to read deployment extension after update.", + "Failed to read deployment extension after update.", + ) + response.State.RemoveResource(ctx) + return } - return res, nil + // Finally, set the state + response.Diagnostics.Append(response.State.Set(ctx, newState)...) } diff --git a/ec/ecresource/extensionresource/update_test.go b/ec/ecresource/extensionresource/update_test.go deleted file mode 100644 index 6c42b610f..000000000 --- a/ec/ecresource/extensionresource/update_test.go +++ /dev/null @@ -1,194 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package extensionresource - -import ( - "context" - "testing" - - "github.com/go-openapi/strfmt" - "github.com/stretchr/testify/assert" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -func Test_updateResource(t *testing.T) { - tc200withoutFilePath := util.NewResourceData(t, util.ResDataParams{ - ID: "12345678", - State: newExtension(), - Schema: newSchema(), - }) - - wantTC200statewithoutFilePath := newExtension() - wantTC200statewithoutFilePath["name"] = "updated_extension" - wantTC200withoutFilePath := util.NewResourceData(t, util.ResDataParams{ - ID: "12345678", - State: wantTC200statewithoutFilePath, - Schema: newSchema(), - }) - - tc200withFilePath := util.NewResourceData(t, util.ResDataParams{ - ID: "12345678", - State: newExtensionWithFilePath(), - Schema: newSchema(), - }) - wantTC200statewithFilePath := newExtensionWithFilePath() - wantTC200statewithFilePath["name"] = "updated_extension" - wantTC200withFilePath := util.NewResourceData(t, util.ResDataParams{ - ID: "12345678", - State: wantTC200statewithFilePath, - Schema: newSchema(), - }) - - tc500Err := util.NewResourceData(t, util.ResDataParams{ - ID: "12345678", - State: newExtension(), - Schema: newSchema(), - }) - wantTC500 := util.NewResourceData(t, util.ResDataParams{ - ID: "12345678", - State: newExtension(), - Schema: newSchema(), - }) - - lastModified, _ := strfmt.ParseDateTime("2021-01-07T22:13:42.999Z") - type args struct { - ctx context.Context - d *schema.ResourceData - meta interface{} - } - tests := []struct { - name string - args args - want diag.Diagnostics - wantRD *schema.ResourceData - }{ - { - name: "returns nil when it receives a 200 without file_path", - args: args{ - d: tc200withoutFilePath, - meta: api.NewMock( - mock.New200StructResponse(models.Extension{ // update request response - Name: ec.String("updated_extension"), - ExtensionType: ec.String("bundle"), - Description: "my description", - Version: ec.String("*"), - DownloadURL: "https://example.com", - URL: ec.String("repo://1234"), - FileMetadata: &models.ExtensionFileMetadata{ - LastModifiedDate: lastModified, - Size: 1000, - }, - }), - mock.New200StructResponse(models.Extension{ // read request response - Name: ec.String("updated_extension"), - ExtensionType: ec.String("bundle"), - Description: "my description", - Version: ec.String("*"), - DownloadURL: "https://example.com", - URL: ec.String("repo://1234"), - FileMetadata: &models.ExtensionFileMetadata{ - LastModifiedDate: lastModified, - Size: 1000, - }, - }), - ), - }, - want: nil, - wantRD: wantTC200withoutFilePath, - }, - { - name: "returns nil when it receives a 200 with file_path", - args: args{ - d: tc200withFilePath, - meta: api.NewMock( - mock.New200StructResponse(models.Extension{ // update request response - Name: ec.String("updated_extension"), - ExtensionType: ec.String("bundle"), - Description: "my description", - Version: ec.String("*"), - DownloadURL: "https://example.com", - URL: ec.String("repo://1234"), - FileMetadata: &models.ExtensionFileMetadata{ - LastModifiedDate: lastModified, - Size: 1000, - }, - }), - mock.New200StructResponse(nil), // upload request response - mock.New200StructResponse(models.Extension{ // read request response - Name: ec.String("updated_extension"), - ExtensionType: ec.String("bundle"), - Description: "my description", - Version: ec.String("*"), - DownloadURL: "https://example.com", - URL: ec.String("repo://1234"), - FileMetadata: &models.ExtensionFileMetadata{ - LastModifiedDate: lastModified, - Size: 1000, - }, - }), - ), - }, - want: nil, - wantRD: wantTC200withFilePath, - }, - { - name: "returns an error when it receives a 500", - args: args{ - d: tc500Err, - meta: api.NewMock(mock.NewErrorResponse(500, mock.APIError{ - Code: "some", Message: "message", - })), - }, - want: diag.Diagnostics{ - { - Severity: diag.Error, - Summary: "api error: 1 error occurred:\n\t* some: message\n\n", - }, - }, - wantRD: wantTC500, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := updateResource(tt.args.ctx, tt.args.d, tt.args.meta) - assert.Equal(t, tt.want, got) - var want interface{} - if tt.wantRD != nil { - if s := tt.wantRD.State(); s != nil { - want = s.Attributes - } - } - - var gotState interface{} - if s := tt.args.d.State(); s != nil { - gotState = s.Attributes - } - - assert.Equal(t, want, gotState) - }) - } -} diff --git a/ec/ecresource/extensionresource/upload.go b/ec/ecresource/extensionresource/upload.go index d05851575..a3e53d36e 100644 --- a/ec/ecresource/extensionresource/upload.go +++ b/ec/ecresource/extensionresource/upload.go @@ -20,28 +20,29 @@ package extensionresource import ( "os" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/diag" - "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/extensionapi" - "github.com/elastic/cloud-sdk-go/pkg/multierror" ) -func uploadExtension(client *api.API, d *schema.ResourceData) error { - filePath := d.Get("file_path").(string) - reader, err := os.Open(filePath) +func (r *Resource) uploadExtension(state modelV0) diag.Diagnostics { + var diags diag.Diagnostics + + reader, err := os.Open(state.FilePath.Value) if err != nil { - return multierror.NewPrefixed("failed to open file", err) + diags.AddError("failed to open file", err.Error()) + return diags } _, err = extensionapi.Upload(extensionapi.UploadParams{ - API: client, - ExtensionID: d.Id(), + API: r.client, + ExtensionID: state.ID.Value, File: reader, }) if err != nil { - return err + diags.AddError("failed to upload file", err.Error()) + return diags } - return nil + return diags } diff --git a/ec/ecresource/trafficfilterresource/create.go b/ec/ecresource/trafficfilterresource/create.go index bc3f8614f..25c05136a 100644 --- a/ec/ecresource/trafficfilterresource/create.go +++ b/ec/ecresource/trafficfilterresource/create.go @@ -59,7 +59,12 @@ func (r Resource) Create(ctx context.Context, request resource.CreateRequest, re found, diags := r.read(ctx, newState.ID.Value, &newState) response.Diagnostics.Append(diags...) if !found { + response.Diagnostics.AddError( + "Failed to read deployment traffic filter ruleset after create.", + "Failed to read deployment traffic filter ruleset after create.", + ) response.State.RemoveResource(ctx) + return } if response.Diagnostics.HasError() { return diff --git a/ec/ecresource/trafficfilterresource/expanders.go b/ec/ecresource/trafficfilterresource/expanders.go index a60a9721d..a4f07d0d4 100644 --- a/ec/ecresource/trafficfilterresource/expanders.go +++ b/ec/ecresource/trafficfilterresource/expanders.go @@ -23,7 +23,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" ) func expandModel(ctx context.Context, state modelV0) (*models.TrafficFilterRulesetRequest, diag.Diagnostics) { @@ -36,11 +35,11 @@ func expandModel(ctx context.Context, state modelV0) (*models.TrafficFilterRules } var request = models.TrafficFilterRulesetRequest{ - Name: ec.String(state.Name.Value), - Type: ec.String(state.Type.Value), - Region: ec.String(state.Region.Value), + Name: &state.Name.Value, + Type: &state.Type.Value, + Region: &state.Region.Value, Description: state.Description.Value, - IncludeByDefault: ec.Bool(state.IncludeByDefault.Value), + IncludeByDefault: &state.IncludeByDefault.Value, Rules: make([]*models.TrafficFilterRule, 0, len(ruleSet)), } diff --git a/ec/ecresource/trafficfilterresource/resource_test.go b/ec/ecresource/trafficfilterresource/resource_test.go index ac39c0cdd..beb22c8ec 100644 --- a/ec/ecresource/trafficfilterresource/resource_test.go +++ b/ec/ecresource/trafficfilterresource/resource_test.go @@ -125,7 +125,7 @@ func TestResourceTrafficFilter_failedRead1(t *testing.T) { }) } -func TestResourceTrafficFilter_gracefulDeletionOnUpdate(t *testing.T) { +func TestResourceTrafficFilter_notFoundAfterUpdate(t *testing.T) { r.UnitTest(t, r.TestCase{ ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( api.NewMock( @@ -135,7 +135,7 @@ func TestResourceTrafficFilter_gracefulDeletionOnUpdate(t *testing.T) { readResponse("false", "true"), updateResponse("false"), notFoundReadResponse("false"), - notFoundReadResponse("false"), + notFoundReadResponse("true"), ), ), Steps: []r.TestStep{ @@ -144,9 +144,8 @@ func TestResourceTrafficFilter_gracefulDeletionOnUpdate(t *testing.T) { Check: checkResource("true"), }, { // Update resource - Config: trafficFilterWithoutIncludeByDefault, - Check: checkResource("false"), // Update can't remove the resource, so it should stay the same. - ExpectNonEmptyPlan: true, // terraform refresh will detect the removed resource, so we will end up with a non-empty plan. + Config: trafficFilterWithoutIncludeByDefault, + ExpectError: regexp.MustCompile(`Failed to read deployment traffic filter ruleset after update.`), }, }, }) diff --git a/ec/ecresource/trafficfilterresource/update.go b/ec/ecresource/trafficfilterresource/update.go index fdbe03cde..50af50f15 100644 --- a/ec/ecresource/trafficfilterresource/update.go +++ b/ec/ecresource/trafficfilterresource/update.go @@ -40,6 +40,7 @@ func (r Resource) Update(ctx context.Context, request resource.UpdateRequest, re } trafficFilterRulesetRequest, diags := expandModel(ctx, newState) + response.Diagnostics.Append(diags...) _, err := trafficfilterapi.Update(trafficfilterapi.UpdateParams{ API: r.client, ID: newState.ID.Value, Req: trafficFilterRulesetRequest, @@ -55,12 +56,12 @@ func (r Resource) Update(ctx context.Context, request resource.UpdateRequest, re return } if !found { - // We can't unset the state here, and must make sure to set the state according to the plan below. - // So all we do is add a warning. - diags.AddWarning( - "Failed to read traffic filter rule.", - "Please run terraform refresh to ensure a consistent state.", + response.Diagnostics.AddError( + "Failed to read deployment traffic filter ruleset after update.", + "Failed to read deployment traffic filter ruleset after update.", ) + response.State.RemoveResource(ctx) + return } // Finally, set the state diff --git a/ec/internal/util/helpers.go b/ec/internal/util/helpers.go index 4abc5b5c7..960ab8c19 100644 --- a/ec/internal/util/helpers.go +++ b/ec/internal/util/helpers.go @@ -132,6 +132,7 @@ func StringListAsType(in []string) types.List { } return types.List{ElemType: types.StringType, Elems: out} } + func StringMapAsType(in map[string]string) types.Map { //goland:noinspection GoPreferNilSlice out := make(map[string]attr.Value, len(in)) diff --git a/ec/provider.go b/ec/provider.go index d076cc8af..3c243cf07 100644 --- a/ec/provider.go +++ b/ec/provider.go @@ -78,8 +78,7 @@ func LegacyProvider() *schema.Provider { Schema: newSchema(), DataSourcesMap: map[string]*schema.Resource{}, ResourcesMap: map[string]*schema.Resource{ - "ec_deployment": deploymentresource.Resource(), - "ec_deployment_extension": extensionresource.Resource(), + "ec_deployment": deploymentresource.Resource(), }, } } @@ -170,6 +169,7 @@ func (p *Provider) DataSources(ctx context.Context) []func() datasource.DataSour func (p *Provider) Resources(ctx context.Context) []func() resource.Resource { return []func() resource.Resource{ func() resource.Resource { return &elasticsearchkeystoreresource.Resource{} }, + func() resource.Resource { return &extensionresource.Resource{} }, func() resource.Resource { return &trafficfilterresource.Resource{} }, func() resource.Resource { return &trafficfilterassocresource.Resource{} }, } From 320fb77ee03329be71c85d182447d6ae0ff044bc Mon Sep 17 00:00:00 2001 From: Dmitry Onishchenko <8962171+dimuon@users.noreply.github.com> Date: Wed, 7 Dec 2022 10:41:08 +0100 Subject: [PATCH 012/104] Feature/530/migrate to plugin framework (#2) --- .changelog/507.txt | 3 - .changelog/547.txt | 3 + .github/workflows/branch.yml | 2 +- CHANGELOG.md | 16 +- Makefile | 2 +- NOTICE | 22 +- README.md | 128 +- build/Makefile.deps | 2 +- build/Makefile.test | 4 +- build/Makefile.tools | 2 +- .../ec_aws_privatelink_endpoint.md | 29 + .../ec_azure_privatelink_endpoint.md | 28 + ...ec_gcp_private_service_connect_endpoint.md | 28 + docs/guides/configuring-sso-ec-deployment.md | 26 +- docs/resources/ec_deployment.md | 118 +- .../ec_deployment_elasticsearch_keystore.md | 6 +- .../resources/ec_deployment_traffic_filter.md | 26 +- ec/acc/acc_prereq.go | 16 +- ec/acc/datasource_deployment_basic_test.go | 102 +- ec/acc/deployment_autoscaling_test.go | 99 +- ec/acc/deployment_basic_defaults_test.go | 190 +- ec/acc/deployment_basic_tags_test.go | 92 +- ec/acc/deployment_basic_test.go | 83 +- ec/acc/deployment_ccs_test.go | 113 +- ec/acc/deployment_compute_optimized_test.go | 70 +- ec/acc/deployment_dedicated_test.go | 109 +- .../deployment_docker_image_override_test.go | 8 +- ...deployment_elasticsearch_keystore_test.go} | 5 +- ec/acc/deployment_emptyconf_test.go | 10 +- ec/acc/deployment_enterprise_search_test.go | 78 +- .../deployment_failed_upgrade_retry_test.go | 2 +- ec/acc/deployment_hotwarm_test.go | 76 +- ec/acc/deployment_integrations_server_test.go | 32 +- ec/acc/deployment_memory_optimized_test.go | 70 +- ec/acc/deployment_observability_self_test.go | 6 +- ec/acc/deployment_observability_test.go | 29 +- ec/acc/deployment_observability_tpl_test.go | 75 +- .../deployment_post_node_role_upgrade_test.go | 58 +- ...deployment_pre_node_role_migration_test.go | 105 +- ec/acc/deployment_security_test.go | 70 +- ec/acc/deployment_snapshot_test.go | 2 +- ...loyment_traffic_filter_association_test.go | 2 +- .../deployment_with_extension_bundle_test.go | 9 +- .../testdata/datasource_deployment_basic.tf | 28 +- ec/acc/testdata/datasource_tags.tf | 10 +- ec/acc/testdata/deployment_autoscaling_1.tf | 35 +- ec/acc/testdata/deployment_autoscaling_2.tf | 35 +- ec/acc/testdata/deployment_basic.tf | 26 +- .../testdata/deployment_basic_defaults_1.tf | 14 +- .../testdata/deployment_basic_defaults_2.tf | 24 +- .../testdata/deployment_basic_defaults_3.tf | 10 +- .../deployment_basic_defaults_hw_1.tf | 8 +- .../deployment_basic_defaults_hw_2.tf | 13 +- .../deployment_basic_integrations_server_1.tf | 10 +- .../deployment_basic_integrations_server_2.tf | 14 +- .../deployment_basic_settings_config_1.tf | 26 +- .../deployment_basic_settings_config_2.tf | 37 +- ...deployment_basic_settings_config_import.tf | 56 + ec/acc/testdata/deployment_basic_tags_1.tf | 8 +- ec/acc/testdata/deployment_basic_tags_2.tf | 8 +- ec/acc/testdata/deployment_basic_tags_3.tf | 8 +- ec/acc/testdata/deployment_basic_tags_4.tf | 8 +- .../deployment_basic_with_traffic_filter_2.tf | 14 +- .../deployment_basic_with_traffic_filter_3.tf | 14 +- ec/acc/testdata/deployment_ccs_1.tf | 27 +- ec/acc/testdata/deployment_ccs_2.tf | 10 +- .../deployment_compute_optimized_1.tf | 8 +- .../deployment_compute_optimized_2.tf | 12 +- .../deployment_dedicated_coordinating.tf | 28 +- .../testdata/deployment_dedicated_master.tf | 37 +- .../deployment_docker_image_override.tf | 30 +- .../deployment_elasticsearch_keystore_1.tf | 10 +- ...deployment_elasticsearch_keystore_1_041.tf | 32 + ...yment_elasticsearch_keystore_1_migrated.tf | 32 + .../deployment_elasticsearch_keystore_2.tf | 10 +- .../deployment_elasticsearch_keystore_3.tf | 10 +- .../deployment_elasticsearch_keystore_4.tf | 10 +- ec/acc/testdata/deployment_emptyconfig.tf | 12 +- .../deployment_enterprise_search_1.tf | 10 +- .../deployment_enterprise_search_2.tf | 12 +- ec/acc/testdata/deployment_hotwarm_1.tf | 10 +- ec/acc/testdata/deployment_hotwarm_2.tf | 20 +- .../testdata/deployment_memory_optimized_1.tf | 8 +- .../testdata/deployment_memory_optimized_2.tf | 12 +- ec/acc/testdata/deployment_observability_1.tf | 22 +- ec/acc/testdata/deployment_observability_2.tf | 22 +- ec/acc/testdata/deployment_observability_3.tf | 22 +- ec/acc/testdata/deployment_observability_4.tf | 20 +- .../testdata/deployment_observability_self.tf | 20 +- .../deployment_observability_tpl_1.tf | 10 +- .../deployment_observability_tpl_2.tf | 12 +- .../deployment_post_node_roles_upgrade_1.tf | 10 +- .../deployment_post_node_roles_upgrade_2.tf | 10 +- .../deployment_pre_node_roles_migration_1.tf | 10 +- .../deployment_pre_node_roles_migration_2.tf | 10 +- .../deployment_pre_node_roles_migration_3.tf | 20 +- ec/acc/testdata/deployment_security_1.tf | 8 +- ec/acc/testdata/deployment_security_2.tf | 12 +- ec/acc/testdata/deployment_snapshot_1.tf | 8 +- ec/acc/testdata/deployment_snapshot_2.tf | 21 +- ...oyment_traffic_filter_association_basic.tf | 10 +- ...nt_traffic_filter_association_basic_041.tf | 35 + ...filter_association_basic_ignore_changes.tf | 10 +- ...traffic_filter_association_basic_update.tf | 10 +- ec/acc/testdata/deployment_upgrade_retry_1.tf | 12 +- ec/acc/testdata/deployment_upgrade_retry_2.tf | 12 +- .../deployment_with_extension_bundle_file.tf | 10 +- .../deploymentdatasource/datasource.go | 4 +- .../privatelinkdatasource/aws_datasource.go | 82 + .../aws_datasource_test.go | 87 + .../privatelinkdatasource/azure_datasource.go | 71 + .../azure_datasource_test.go | 86 + .../privatelinkdatasource/datasource.go | 102 + .../privatelinkdatasource/gcp_datasource.go | 71 + .../gcp_datasource_test.go | 86 + .../regionPrivateLinkMap.json | 315 + .../deploymentresource/apm/v1/apm.go | 47 + .../deploymentresource/apm/v1/apm_config.go | 42 + .../deploymentresource/apm/v1/schema.go | 184 + .../deploymentresource/apm/v2/apm.go | 199 + .../deploymentresource/apm/v2/apm_config.go | 109 + .../apm/v2/apm_payload_test.go | 260 + .../v2/apm_read_test.go} | 146 +- .../deploymentresource/apm/v2/apm_topology.go | 135 + .../deploymentresource/apm/v2/schema.go | 167 + .../deploymentresource/apm_expanders.go | 207 - .../deploymentresource/apm_expanders_test.go | 264 - .../deploymentresource/apm_flatteners.go | 154 - ec/ecresource/deploymentresource/create.go | 87 +- ec/ecresource/deploymentresource/delete.go | 119 +- .../deploymentresource/delete_test.go | 215 - .../deployment/v1/deployment.go | 71 + .../deployment/v1/schema.go | 133 + .../deployment/v2/deployment.go | 566 ++ .../v2/deployment_create_payload_test.go | 3160 ++++++++++ .../v2/deployment_parse_credentials_test.go | 87 + .../deployment/v2/deployment_read_test.go | 1568 +++++ .../v2/deployment_update_payload_test.go | 2066 +++++++ ...asticsearch_remote_cluster_payload_test.go | 180 + .../v2/partial_stapshot_strategy_test.go | 87 + .../deployment/v2/schema.go | 132 + .../v2}/traffic_filter_test.go | 45 +- .../deployment_not_found_test.go | 75 + .../deploymentresource/deployment_test.go | 197 + .../elasticsearch/v1/elasticsearch.go | 60 + .../elasticsearch/v1/elasticsearch_config.go | 42 + .../v1/elasticsearch_extension.go} | 34 +- .../v1/elasticsearch_remote_cluster.go | 38 + .../v1/elasticsearch_snapshot_source.go | 34 + .../v1/elasticsearch_strategy.go | 34 + .../v1/elasticsearch_topology.go | 54 + .../v1/elasticsearch_topology_autoscaling.go | 40 + .../v1/elasticsearch_topology_config.go | 40 + .../v1/elasticsearch_trust_account.go | 38 + .../v1/elasticsearch_trust_external.go | 36 + .../elasticsearch/v1/schema.go | 544 ++ .../elasticsearch/v2/elasticsearch.go | 413 ++ .../elasticsearch/v2/elasticsearch_config.go | 120 + .../v2/elasticsearch_extension.go | 136 + .../v2/elasticsearch_payload_test.go} | 816 ++- .../v2/elasticsearch_read_test.go} | 241 +- .../v2/elasticsearch_remote_cluster.go | 118 + .../v2/elasticsearch_snapshot_source.go | 62 + .../v2/elasticsearch_topology.go | 394 ++ .../v2/elasticsearch_trust_account.go | 59 + .../v2/elasticsearch_trust_external.go | 168 + .../v2/node_roles_plan_modifier.go | 106 + .../v2/node_types_plan_modifier.go | 124 + .../elasticsearch/v2/schema.go | 479 ++ .../v2/topology_plan_modifier.go | 112 + .../elasticsearch_expanders.go | 655 --- .../elasticsearch_flatteners.go | 360 -- .../elasticsearch_remote_cluster_expanders.go | 80 - ...ticsearch_remote_cluster_expanders_test.go | 155 - .../enterprise_search_expanders.go | 211 - .../enterprise_search_expanders_test.go | 358 -- .../enterprise_search_flatteners.go | 149 - .../enterprisesearch/v1/enterprise_search.go | 46 + .../v1/enterprise_search_config.go | 40 + .../v1/enterprise_search_topology.go | 44 + .../enterprisesearch/v1/schema.go | 181 + .../enterprisesearch/v2/enterprise_search.go | 217 + .../v2/enterprise_search_config.go | 96 + .../v2/enterprise_search_payload_test.go | 338 ++ .../v2/enterprise_search_read_test.go} | 56 +- .../v2/enterprise_search_topology.go | 161 + .../enterprisesearch/v2/schema.go | 168 + ec/ecresource/deploymentresource/expanders.go | 374 -- .../deploymentresource/expanders_test.go | 5119 ----------------- .../deploymentresource/flatteners.go | 321 -- .../deploymentresource/flatteners_test.go | 1787 ------ ec/ecresource/deploymentresource/import.go | 76 - .../deploymentresource/import_test.go | 241 - .../integrations_server_expanders.go | 207 - .../integrations_server_expanders_test.go | 264 - .../integrations_server_flatteners.go | 154 - .../v1/integrations_server.go | 47 + .../v1/integrations_server_config.go | 42 + .../integrationsserver/v1/schema.go | 171 + .../v2/integrations_server.go | 190 + .../v2/integrations_server_config.go | 124 + .../v2/integrations_server_payload_test.go | 254 + .../v2/integrations_server_read_test.go} | 132 +- .../v2/integrations_server_topology.go | 139 + .../integrationsserver/v2/schema.go | 158 + .../deploymentresource/kibana/v1/kibana.go | 49 + .../kibana/v1/kibana_config.go | 40 + .../deploymentresource/kibana/v1/schema.go | 160 + .../deploymentresource/kibana/v2/kibana.go | 198 + .../kibana/v2/kibana_config.go | 98 + .../kibana/v2/kibana_payload_test.go | 250 + .../v2/kibana_read_test.go} | 114 +- .../kibana/v2/kibana_topology.go | 142 + .../deploymentresource/kibana/v2/schema.go | 147 + .../deploymentresource/kibana_expanders.go | 196 - .../kibana_expanders_test.go | 261 - .../deploymentresource/kibana_flatteners.go | 134 - .../deploymentresource/observability.go | 117 - .../observability/v1/observability.go | 38 + .../observability/v1/schema.go | 66 + .../observability/v2/observability.go | 130 + .../v2/observability_payload_test.go} | 171 +- .../v2/observability_read_test.go | 127 + .../observability/v2/schema.go | 64 + ec/ecresource/deploymentresource/read.go | 157 +- ec/ecresource/deploymentresource/read_test.go | 199 - ec/ecresource/deploymentresource/resource.go | 74 +- ec/ecresource/deploymentresource/schema.go | 211 - .../deploymentresource/schema_apm.go | 141 - .../schema_elasticsearch.go | 549 -- .../schema_enteprise_search.go | 148 - .../schema_integrations_server.go | 140 - .../deploymentresource/schema_kibana.go | 130 - ec/ecresource/deploymentresource/schema_v0.go | 704 --- ...-empty-config-create-expected-payload.json | 214 + ...-v2-empty-config-expected-deployment1.json | 767 +++ ...-v2-empty-config-expected-deployment2.json | 1313 +++++ ...-v2-empty-config-expected-deployment3.json | 669 +++ .../testdata/aws-io-optimized-v2.json | 363 ++ .../{ => testutil}/testutil_func.go | 23 +- .../{ => testutil}/testutil_func_test.go | 4 +- .../deploymentresource/testutil_datastruct.go | 263 - .../topology/v1/topology.go | 38 + .../deploymentresource/traffic_filter.go | 69 - ec/ecresource/deploymentresource/update.go | 156 +- .../deploymentresource/update_test.go | 96 - .../update_traffic_rules.go | 115 - .../update_traffic_rules_test.go | 96 - .../deploymentresource/utils/definitions.go | 23 + .../utils/enrich_elasticsearch_template.go | 59 + .../deploymentresource/utils/get_first.go} | 33 +- .../deploymentresource/utils/getters.go | 175 + .../deploymentresource/utils/getters_test.go | 206 + .../utils/missing_field_error.go | 24 + .../utils/node_types_to_node_roles.go | 94 + .../{ => utils}/stopped_resource.go | 22 +- .../{ => utils}/stopped_resource_test.go | 18 +- ec/ecresource/extensionresource/create.go | 17 + ec/ecresource/extensionresource/delete.go | 17 + ec/ecresource/extensionresource/read.go | 17 + ec/ecresource/extensionresource/update.go | 17 + ec/internal/converters/convert_tags.go | 96 + .../convert_tags_test.go} | 4 +- ec/internal/converters/extract_endpoint.go | 64 + ec/internal/converters/parse_topology_size.go | 51 + ec/internal/util/parsers.go | 30 +- ec/internal/validators/length.go | 69 + ec/internal/validators/notempty.go | 58 + ec/internal/validators/oneOf.go | 64 + ec/provider.go | 16 +- ec/provider_config.go | 15 - ec/version.go | 2 +- examples/deployment/deployment.tf | 24 +- examples/deployment_ccs/deployment.tf | 52 +- .../elastic_deployment.tf | 10 +- examples/deployment_ec2_instance/provider.tf | 2 +- examples/deployment_with_init/deployment.tf | 15 +- examples/deployment_with_init/provider.tf | 2 +- examples/extension_bundle/extension.tf | 2 +- go.mod | 34 +- go.sum | 106 +- main.go | 43 +- 282 files changed, 24962 insertions(+), 17761 deletions(-) delete mode 100644 .changelog/507.txt create mode 100644 .changelog/547.txt create mode 100644 docs/data-sources/ec_aws_privatelink_endpoint.md create mode 100644 docs/data-sources/ec_azure_privatelink_endpoint.md create mode 100644 docs/data-sources/ec_gcp_private_service_connect_endpoint.md rename ec/acc/{deployment_elasticsearch_kesytore_test.go => deployment_elasticsearch_keystore_test.go} (98%) create mode 100644 ec/acc/testdata/deployment_basic_settings_config_import.tf create mode 100644 ec/acc/testdata/deployment_elasticsearch_keystore_1_041.tf create mode 100644 ec/acc/testdata/deployment_elasticsearch_keystore_1_migrated.tf create mode 100644 ec/acc/testdata/deployment_traffic_filter_association_basic_041.tf create mode 100644 ec/ecdatasource/privatelinkdatasource/aws_datasource.go create mode 100644 ec/ecdatasource/privatelinkdatasource/aws_datasource_test.go create mode 100644 ec/ecdatasource/privatelinkdatasource/azure_datasource.go create mode 100644 ec/ecdatasource/privatelinkdatasource/azure_datasource_test.go create mode 100644 ec/ecdatasource/privatelinkdatasource/datasource.go create mode 100644 ec/ecdatasource/privatelinkdatasource/gcp_datasource.go create mode 100644 ec/ecdatasource/privatelinkdatasource/gcp_datasource_test.go create mode 100644 ec/ecdatasource/privatelinkdatasource/regionPrivateLinkMap.json create mode 100644 ec/ecresource/deploymentresource/apm/v1/apm.go create mode 100644 ec/ecresource/deploymentresource/apm/v1/apm_config.go create mode 100644 ec/ecresource/deploymentresource/apm/v1/schema.go create mode 100644 ec/ecresource/deploymentresource/apm/v2/apm.go create mode 100644 ec/ecresource/deploymentresource/apm/v2/apm_config.go create mode 100644 ec/ecresource/deploymentresource/apm/v2/apm_payload_test.go rename ec/ecresource/deploymentresource/{apm_flatteners_test.go => apm/v2/apm_read_test.go} (69%) create mode 100644 ec/ecresource/deploymentresource/apm/v2/apm_topology.go create mode 100644 ec/ecresource/deploymentresource/apm/v2/schema.go delete mode 100644 ec/ecresource/deploymentresource/apm_expanders.go delete mode 100644 ec/ecresource/deploymentresource/apm_expanders_test.go delete mode 100644 ec/ecresource/deploymentresource/apm_flatteners.go delete mode 100644 ec/ecresource/deploymentresource/delete_test.go create mode 100644 ec/ecresource/deploymentresource/deployment/v1/deployment.go create mode 100644 ec/ecresource/deploymentresource/deployment/v1/schema.go create mode 100644 ec/ecresource/deploymentresource/deployment/v2/deployment.go create mode 100644 ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload_test.go create mode 100644 ec/ecresource/deploymentresource/deployment/v2/deployment_parse_credentials_test.go create mode 100644 ec/ecresource/deploymentresource/deployment/v2/deployment_read_test.go create mode 100644 ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload_test.go create mode 100644 ec/ecresource/deploymentresource/deployment/v2/elasticsearch_remote_cluster_payload_test.go create mode 100644 ec/ecresource/deploymentresource/deployment/v2/partial_stapshot_strategy_test.go create mode 100644 ec/ecresource/deploymentresource/deployment/v2/schema.go rename ec/ecresource/deploymentresource/{ => deployment/v2}/traffic_filter_test.go (77%) create mode 100644 ec/ecresource/deploymentresource/deployment_not_found_test.go create mode 100644 ec/ecresource/deploymentresource/deployment_test.go create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch.go create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_config.go rename ec/{internal/flatteners/flatten_endpoint.go => ecresource/deploymentresource/elasticsearch/v1/elasticsearch_extension.go} (54%) create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_remote_cluster.go create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_snapshot_source.go create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_strategy.go create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_topology.go create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_topology_autoscaling.go create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_topology_config.go create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_trust_account.go create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_trust_external.go create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v1/schema.go create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch.go create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_config.go create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_extension.go rename ec/ecresource/deploymentresource/{elasticsearch_expanders_test.go => elasticsearch/v2/elasticsearch_payload_test.go} (73%) rename ec/ecresource/deploymentresource/{elasticsearch_flatteners_test.go => elasticsearch/v2/elasticsearch_read_test.go} (63%) create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_remote_cluster.go create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_snapshot_source.go create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_trust_account.go create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_trust_external.go create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier.go create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier.go create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v2/schema.go create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier.go delete mode 100644 ec/ecresource/deploymentresource/elasticsearch_expanders.go delete mode 100644 ec/ecresource/deploymentresource/elasticsearch_flatteners.go delete mode 100644 ec/ecresource/deploymentresource/elasticsearch_remote_cluster_expanders.go delete mode 100644 ec/ecresource/deploymentresource/elasticsearch_remote_cluster_expanders_test.go delete mode 100644 ec/ecresource/deploymentresource/enterprise_search_expanders.go delete mode 100644 ec/ecresource/deploymentresource/enterprise_search_expanders_test.go delete mode 100644 ec/ecresource/deploymentresource/enterprise_search_flatteners.go create mode 100644 ec/ecresource/deploymentresource/enterprisesearch/v1/enterprise_search.go create mode 100644 ec/ecresource/deploymentresource/enterprisesearch/v1/enterprise_search_config.go create mode 100644 ec/ecresource/deploymentresource/enterprisesearch/v1/enterprise_search_topology.go create mode 100644 ec/ecresource/deploymentresource/enterprisesearch/v1/schema.go create mode 100644 ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search.go create mode 100644 ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_config.go create mode 100644 ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_payload_test.go rename ec/ecresource/deploymentresource/{enterprise_search_flatteners_test.go => enterprisesearch/v2/enterprise_search_read_test.go} (79%) create mode 100644 ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_topology.go create mode 100644 ec/ecresource/deploymentresource/enterprisesearch/v2/schema.go delete mode 100644 ec/ecresource/deploymentresource/expanders.go delete mode 100644 ec/ecresource/deploymentresource/expanders_test.go delete mode 100644 ec/ecresource/deploymentresource/flatteners.go delete mode 100644 ec/ecresource/deploymentresource/flatteners_test.go delete mode 100644 ec/ecresource/deploymentresource/import.go delete mode 100644 ec/ecresource/deploymentresource/import_test.go delete mode 100644 ec/ecresource/deploymentresource/integrations_server_expanders.go delete mode 100644 ec/ecresource/deploymentresource/integrations_server_expanders_test.go delete mode 100644 ec/ecresource/deploymentresource/integrations_server_flatteners.go create mode 100644 ec/ecresource/deploymentresource/integrationsserver/v1/integrations_server.go create mode 100644 ec/ecresource/deploymentresource/integrationsserver/v1/integrations_server_config.go create mode 100644 ec/ecresource/deploymentresource/integrationsserver/v1/schema.go create mode 100644 ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server.go create mode 100644 ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_config.go create mode 100644 ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_payload_test.go rename ec/ecresource/deploymentresource/{integrations_server_flatteners_test.go => integrationsserver/v2/integrations_server_read_test.go} (71%) create mode 100644 ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_topology.go create mode 100644 ec/ecresource/deploymentresource/integrationsserver/v2/schema.go create mode 100644 ec/ecresource/deploymentresource/kibana/v1/kibana.go create mode 100644 ec/ecresource/deploymentresource/kibana/v1/kibana_config.go create mode 100644 ec/ecresource/deploymentresource/kibana/v1/schema.go create mode 100644 ec/ecresource/deploymentresource/kibana/v2/kibana.go create mode 100644 ec/ecresource/deploymentresource/kibana/v2/kibana_config.go create mode 100644 ec/ecresource/deploymentresource/kibana/v2/kibana_payload_test.go rename ec/ecresource/deploymentresource/{kibana_flatteners_test.go => kibana/v2/kibana_read_test.go} (57%) create mode 100644 ec/ecresource/deploymentresource/kibana/v2/kibana_topology.go create mode 100644 ec/ecresource/deploymentresource/kibana/v2/schema.go delete mode 100644 ec/ecresource/deploymentresource/kibana_expanders.go delete mode 100644 ec/ecresource/deploymentresource/kibana_expanders_test.go delete mode 100644 ec/ecresource/deploymentresource/kibana_flatteners.go delete mode 100644 ec/ecresource/deploymentresource/observability.go create mode 100644 ec/ecresource/deploymentresource/observability/v1/observability.go create mode 100644 ec/ecresource/deploymentresource/observability/v1/schema.go create mode 100644 ec/ecresource/deploymentresource/observability/v2/observability.go rename ec/ecresource/deploymentresource/{observability_test.go => observability/v2/observability_payload_test.go} (61%) create mode 100644 ec/ecresource/deploymentresource/observability/v2/observability_read_test.go create mode 100644 ec/ecresource/deploymentresource/observability/v2/schema.go delete mode 100644 ec/ecresource/deploymentresource/read_test.go delete mode 100644 ec/ecresource/deploymentresource/schema.go delete mode 100644 ec/ecresource/deploymentresource/schema_apm.go delete mode 100644 ec/ecresource/deploymentresource/schema_elasticsearch.go delete mode 100644 ec/ecresource/deploymentresource/schema_enteprise_search.go delete mode 100644 ec/ecresource/deploymentresource/schema_integrations_server.go delete mode 100644 ec/ecresource/deploymentresource/schema_kibana.go delete mode 100644 ec/ecresource/deploymentresource/schema_v0.go create mode 100644 ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-create-expected-payload.json create mode 100644 ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-expected-deployment1.json create mode 100644 ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-expected-deployment2.json create mode 100644 ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-expected-deployment3.json create mode 100644 ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2.json rename ec/ecresource/deploymentresource/{ => testutil}/testutil_func.go (78%) rename ec/ecresource/deploymentresource/{ => testutil}/testutil_func_test.go (96%) delete mode 100644 ec/ecresource/deploymentresource/testutil_datastruct.go create mode 100644 ec/ecresource/deploymentresource/topology/v1/topology.go delete mode 100644 ec/ecresource/deploymentresource/traffic_filter.go delete mode 100644 ec/ecresource/deploymentresource/update_test.go delete mode 100644 ec/ecresource/deploymentresource/update_traffic_rules.go delete mode 100644 ec/ecresource/deploymentresource/update_traffic_rules_test.go create mode 100644 ec/ecresource/deploymentresource/utils/definitions.go create mode 100644 ec/ecresource/deploymentresource/utils/enrich_elasticsearch_template.go rename ec/{internal/flatteners/flatten_tags.go => ecresource/deploymentresource/utils/get_first.go} (63%) create mode 100644 ec/ecresource/deploymentresource/utils/getters.go create mode 100644 ec/ecresource/deploymentresource/utils/getters_test.go create mode 100644 ec/ecresource/deploymentresource/utils/missing_field_error.go create mode 100644 ec/ecresource/deploymentresource/utils/node_types_to_node_roles.go rename ec/ecresource/deploymentresource/{ => utils}/stopped_resource.go (68%) rename ec/ecresource/deploymentresource/{ => utils}/stopped_resource_test.go (89%) create mode 100644 ec/internal/converters/convert_tags.go rename ec/internal/{flatteners/flatten_tags_test.go => converters/convert_tags_test.go} (96%) create mode 100644 ec/internal/converters/extract_endpoint.go create mode 100644 ec/internal/converters/parse_topology_size.go create mode 100644 ec/internal/validators/length.go create mode 100644 ec/internal/validators/notempty.go create mode 100644 ec/internal/validators/oneOf.go diff --git a/.changelog/507.txt b/.changelog/507.txt deleted file mode 100644 index 789bd8643..000000000 --- a/.changelog/507.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -resource/elasticsearch: Adds support for the `strategy` property to the `elasticsearch` resource. This allows users to define how different plan changes are coordinated. -``` diff --git a/.changelog/547.txt b/.changelog/547.txt new file mode 100644 index 000000000..4a383e3f1 --- /dev/null +++ b/.changelog/547.txt @@ -0,0 +1,3 @@ +```release-note:feature +resource/deployment: Utilise the template migration API to build the base update request when changing `deployment_template_id`. This results in more reliable changes between deployment templates. +``` diff --git a/.github/workflows/branch.yml b/.github/workflows/branch.yml index b9ed3c893..0ad07d252 100644 --- a/.github/workflows/branch.yml +++ b/.github/workflows/branch.yml @@ -35,7 +35,7 @@ jobs: echo ::set-output name=BRANCH::$(echo ${BRANCH} | cut -d '.' -f1-2 | tr -d 'v') - name: Create a github branch - uses: peterjgrainger/action-create-branch@v2.2.0 + uses: peterjgrainger/action-create-branch@v2.3.0 env: GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} with: diff --git a/CHANGELOG.md b/CHANGELOG.md index ce79dbb69..ef0490875 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,23 @@ -# 0.5.0 (Unreleased) +# 0.6.0 (Unreleased) FEATURES: +* resource/deployment: Utilise the template migration API to build the base update request when changing `deployment_template_id`. This results in more reliable changes between deployment templates. ([#547](https://github.com/elastic/terraform-provider-ec/issues/547)) + +# 0.5.0 (Oct 12, 2022) + +FEATURES: + +* datasource/privatelink: Adds data sources to obtain AWS/Azure Private Link, and GCP Private Service Connect configuration data. ([#533](https://github.com/elastic/terraform-provider-ec/issues/533)) +* resource/deployment: Adds fleet_https_endpoint and apm_https_endpoint to integrations server resources. This allows consumers to explicitly capture service urls for dependent modules. ([#548](https://github.com/elastic/terraform-provider-ec/issues/548)) * resource/elasticsearch: Adds support for the `strategy` property to the `elasticsearch` resource. This allows users to define how different plan changes are coordinated. ([#507](https://github.com/elastic/terraform-provider-ec/issues/507)) +BUG FIXES: + +* resource/deployment: Correctly restrict stateless (Kibana/Enterprise Search/Integrations Server) resources to a single topology element. Fixes a provider crash when multiple elements without an instance_configuration_id were specified. ([#536](https://github.com/elastic/terraform-provider-ec/issues/536)) +* resource/elasticsearchkeystore: Correctly delete keystore items when removed from the module definition. ([#546](https://github.com/elastic/terraform-provider-ec/issues/546)) +* resource: Updates all nested field accesses to validate type casts. This prevents a provider crash when a field is explicitly set to `null`. ([#534](https://github.com/elastic/terraform-provider-ec/issues/534)) + # 0.4.1 (May 11, 2022) BREAKING CHANGES: diff --git a/Makefile b/Makefile index b1b384e71..03d23d726 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ SHELL := /bin/bash export GO111MODULE ?= on -export VERSION := 0.5.0-dev +export VERSION := 0.6.0-dev export BINARY := terraform-provider-ec export GOBIN = $(shell pwd)/bin diff --git a/NOTICE b/NOTICE index a7271a1b3..2843fc736 100755 --- a/NOTICE +++ b/NOTICE @@ -8,7 +8,6 @@ third-party software developed by the licenses listed below. github.com/davecgh/go-spew 0BSD github.com/agext/levenshtein Apache-2.0 -github.com/apparentlymart/go-textseg/v13 Apache-2.0 github.com/elastic/cloud-sdk-go Apache-2.0 github.com/go-openapi/analysis Apache-2.0 github.com/go-openapi/errors Apache-2.0 @@ -60,22 +59,23 @@ github.com/mitchellh/reflectwalk MIT github.com/stretchr/testify MIT github.com/zclconf/go-cty MIT github.com/hashicorp/errwrap MPL-2.0 -github.com/hashicorp/go-checkpoint MPL-2.0 -github.com/hashicorp/go-cleanhttp MPL-2.0 github.com/hashicorp/go-multierror MPL-2.0 -github.com/hashicorp/go-plugin MPL-2.0 github.com/hashicorp/go-uuid MPL-2.0 -github.com/hashicorp/go-version MPL-2.0 -github.com/hashicorp/hc-install MPL-2.0 github.com/hashicorp/hcl/v2 MPL-2.0 +github.com/hashicorp/logutils MPL-2.0 +github.com/hashicorp/terraform-exec MPL-2.0 github.com/hashicorp/terraform-json MPL-2.0 github.com/hashicorp/terraform-plugin-go MPL-2.0 -github.com/hashicorp/terraform-plugin-log MPL-2.0 -github.com/hashicorp/terraform-plugin-sdk/v2 MPL-2.0 github.com/hashicorp/terraform-registry-address MPL-2.0 -github.com/hashicorp/logutils MPL-2.0-no-copyleft-exception -github.com/hashicorp/terraform-exec MPL-2.0-no-copyleft-exception +github.com/hashicorp/yamux MPL-2.0 +github.com/hashicorp/go-checkpoint MPL-2.0-no-copyleft-exception +github.com/hashicorp/go-cleanhttp MPL-2.0-no-copyleft-exception +github.com/hashicorp/go-plugin MPL-2.0-no-copyleft-exception +github.com/hashicorp/go-version MPL-2.0-no-copyleft-exception +github.com/hashicorp/hc-install MPL-2.0-no-copyleft-exception +github.com/hashicorp/terraform-plugin-log MPL-2.0-no-copyleft-exception +github.com/hashicorp/terraform-plugin-sdk/v2 MPL-2.0-no-copyleft-exception github.com/hashicorp/terraform-svchost MPL-2.0-no-copyleft-exception -github.com/hashicorp/yamux MPL-2.0-no-copyleft-exception +github.com/apparentlymart/go-textseg/v13 Unicode-TOU ========================================================================= diff --git a/README.md b/README.md index 1a3c1bb6b..d160ac2cc 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,7 @@ terraform { required_providers { ec = { source = "elastic/ec" - version = "0.5.0" + version = "0.6.0" } } } @@ -72,9 +72,15 @@ resource "ec_deployment" "example_minimal" { deployment_template_id = "aws-io-optimized-v2" # Use the deployment template defaults - elasticsearch {} + elasticsearch = { + hot = { + autoscaling = {} + } + } - kibana {} + kibana = { + topology = {} + } } ``` @@ -114,3 +120,119 @@ $ export EC_API_KEY="" ``` After doing so, you can navigate to any of our examples in `./examples` and try one. + +### Moving to TF Framework and schema change for `ec_deployment` resource. + +v6.0.0 contains migration to [the TF Plugin Framework](https://developer.hashicorp.com/terraform/plugin/framework) and intoducing version 2 for `ec_deployment` resource: + +- switching to attributes syntax instead of blocks for almost all definitions that used to be blocks. It means that, for example, a definition like `config {}` has to be changed to `config = {}`, e.g. + +```hcl +resource "ec_deployment" "defaults" { + name = "example" + region = "us-east-1" + version = data.ec_stack.latest.version + deployment_template_id = "aws-io-optimized-v2" + + elasticsearch = { + hot = { + autoscaling = {} + } + } + + kibana = { + topology = {} + } + + enterprise_search = { + zone_count = 1 + } +} +``` + +- `topology` attribute of `elasticsearch` is replaced with a number of dedicated attributes, one per tier, e.g. + +``` + elasticsearch { + topology { + id = "hot_content" + size = "1g" + autoscaling { + max_size = "8g" + } + } + topology { + id = "warm" + size = "2g" + autoscaling { + max_size = "15g" + } + } + } +``` + +has to be converted to + +``` + elasticsearch = { + hot = { + size = "1g" + autoscaling = { + max_size = "8g" + } + } + + warm = { + size = "2g" + autoscaling = { + max_size = "15g" + } + } + } + +``` + +- due to some existing limitations of TF, nested attributes that are nested inside other nested attributes cannot be `Computed`. It means that all such attributes have to be mentioned in configurations even if they are empty. E.g., a definition of `elasticsearch` has to include all topology elements (tiers) that have non-zero size or can be scaled up (if autoscaling is enabled) in the corresponding template. For example, the simplest definition of `elasticsearch` for `aws-io-optimized-v2` template is + +```hcl +resource "ec_deployment" "defaults" { + name = "example" + region = "us-east-1" + version = data.ec_stack.latest.version + deployment_template_id = "aws-io-optimized-v2" + + elasticsearch = { + hot = { + autoscaling = {} + } + } +} +``` + +Please note that the configuration explicitly mentions `hot` tier and the tier has `autoscaling` and `config` attributes even despite the fact that they are empty. If they were omitted, TF (at least up to version 1.3.3) could complain `Error: Provider produced inconsistent result after apply`. + +- a lot of attributes that used to be collections (e.g. lists and sets) are converted to sigletons, e.g. `elasticsearch`, `apm`, `kibana`, `enterprise_search`, `observability`, `topology`, `autoscaling`, etc. Please note that, generally, users are not expected to make any change to their existing configuration to address this particular change (besides moving from block to attribute syntax). All these components used to exist in single instances, so the change is mostly syntactical, taking into account the switch to attributes instead of blocks (otherwise if we kept list for configs, `config {}` had to be rewritten in `config = [{}]` with the move to the attribute syntax). However this change is a breaking one from the schema perspective and requires state upgrade for existing resources that is performed by TF (by calling the provider's API). + +- [`strategy` attribute](https://registry.terraform.io/providers/elastic/ec/latest/docs/resources/ec_deployment#strategy) is converted to string with the same set of values that was used for its `type` attribute previously; + +- switching to TF protocol 6. From user perspective it should not require any change in their existing configurations. + +#### Migration guide. + +The schema modifications means that a current TF state cannot work as is with the provider version 0.6.0 and higher. + +There are 2 ways to tackle this + +- import existing resource using deployment ID, e.g `terraform import 'ec_deployment.test' ` +- state upgrade that is performed by TF by calling the provider's API so no action is required from user perspective + +Currently the state upgrade functionality is still in development so importing existing resources is the recommended way to deal with existing TF states. + +#### Known issues. + +For the migrated version (0.6.0 or higher), `terraform plan` output can contain more changes comparing to the older versions of the provider (that use TF SDK). +This happens because TF Framework treats all `computed` attributes as `unknown` (known after apply) once configuration changes. +`ec_deployment` schema contains quite a few of such attributes, so `terraform plan`'s output can be quite big for the resource due to the mentioned reason. +However, it doesn't mean that all attributes that marked as `unknown` in the plan will get new values after apply. +To mitigitate the problem, the provider uses plan modifiers that is a recommended way to reduce plan output. +However, currently plan modifiers don't cover the all `computed` attributes. diff --git a/build/Makefile.deps b/build/Makefile.deps index ac6edfdbc..728cee016 100644 --- a/build/Makefile.deps +++ b/build/Makefile.deps @@ -5,7 +5,7 @@ ARCH_GORELEASER:=$(shell $(PWD)/scripts/uname_arch_goreleaser.sh) VERSION_DIR:=$(GOBIN)/versions VERSION_GOLICENSER:=v0.3.0 -VERSION_GOLANGCILINT:=v1.49.0 +VERSION_GOLANGCILINT:=v1.50.0 VERSION_GORELEASER:=v1.2.5 VERSION_GOCHANGELOG:=v0.0.0-20201005170154-56335215ce3a VERSION_VERSIONBUMP:=v1.1.0 diff --git a/build/Makefile.test b/build/Makefile.test index e1229d14f..90a4a1763 100644 --- a/build/Makefile.test +++ b/build/Makefile.test @@ -3,7 +3,7 @@ SWEEP_DIR ?= $(TEST_ACC) SWEEP_CI_RUN_FILTER ?= ec_deployments TEST ?= ./... TEST_COUNT ?= 1 -TESTUNITARGS ?= -timeout 30s -p 4 -race -cover -coverprofile=reports/c.out +TESTUNITARGS ?= -timeout 5m -race -cover -coverprofile=reports/c.out TEST_ACC ?= github.com/elastic/terraform-provider-ec/ec/acc TEST_NAME ?= TestAcc TEST_ACC_PARALLEL = 6 @@ -26,7 +26,7 @@ unit: _report_path tests: unit .PHONY: testacc -## Runs the Terraform acceptance tests. Use TEST_NAME, TESTARGS, TEST_COUNT and TEST_ACC_PARALLEL to control execution. +## Runs the Terraform acceptance tests. Use TEST_NAME, TESTARGS, TEST_COUNT to control execution. testacc: @ echo "-> Running terraform acceptance tests..." @ TF_ACC=1 go test $(TEST_ACC) -v -count $(TEST_COUNT) -parallel $(TEST_ACC_PARALLEL) $(TESTARGS) -timeout 120m -run $(TEST_NAME) diff --git a/build/Makefile.tools b/build/Makefile.tools index ceeeabf1e..466c40c05 100644 --- a/build/Makefile.tools +++ b/build/Makefile.tools @@ -1,6 +1,6 @@ VERSION_TFPROVIDERDOCS:=v0.6.0 -VERSION_TERRAFMT:=12db38d +VERSION_TERRAFMT:=v0.5.2 VERSION_TFPROVIDERLINT:=v0.28.1 ## Installs the required static checks for terraform. diff --git a/docs/data-sources/ec_aws_privatelink_endpoint.md b/docs/data-sources/ec_aws_privatelink_endpoint.md new file mode 100644 index 000000000..1eda34af9 --- /dev/null +++ b/docs/data-sources/ec_aws_privatelink_endpoint.md @@ -0,0 +1,29 @@ +--- +page_title: "Elastic Cloud: ec_aws_privatelink_endpoint" +description: |- + Retrieves infomation about the AWS Private Link configuration for a given region. +--- + +# Data Source: ec_aws_privatelink_endpoint + +Use this data source to retrieve information about the AWS Private Link configuration for a given region. Further documentation on how to establish a PrivateLink connection can be found in the ESS [documentation](https://www.elastic.co/guide/en/cloud/current/ec-traffic-filtering-vpc.html). + +~> **NOTE:** This data source provides data relevant to the Elasticsearch Service (ESS) only, and should not be used for ECE. + +## Example Usage + +```hcl +data "ec_aws_privatelink_endpoint" "us-east-1" { + region = "us-east-1" +} +``` + +## Argument Reference + +* `region` (Required) - Region to retrieve the Private Link configuration for. + +## Attributes Reference + +* `vpc_service_name` - The VPC service name used to connect to the region. +* `domain_name` - The domain name to used in when configuring a private hosted zone in the VPCE connection. +* `zone_ids` - The IDs of the availability zones hosting the VPC endpoints. diff --git a/docs/data-sources/ec_azure_privatelink_endpoint.md b/docs/data-sources/ec_azure_privatelink_endpoint.md new file mode 100644 index 000000000..5d9dd8955 --- /dev/null +++ b/docs/data-sources/ec_azure_privatelink_endpoint.md @@ -0,0 +1,28 @@ +--- +page_title: "Elastic Cloud: ec_azure_privatelink_endpoint" +description: |- + Retrieves infomation about the Azure Private Link configuration for a given region. +--- + +# Data Source: ec_azure_privatelink_endpoint + +Use this data source to retrieve information about the Azure Private Link configuration for a given region. Further documentation on how to establish a PrivateLink connection can be found in the ESS [documentation](https://www.elastic.co/guide/en/cloud/current/ec-traffic-filtering-vnet.html). + +~> **NOTE:** This data source provides data relevant to the Elasticsearch Service (ESS) only, and should not be used for ECE. + +## Example Usage + +```hcl +data "ec_azure_privatelink_endpoint" "eastus" { + region = "eastus" +} +``` + +## Argument Reference + +* `region` (Required) - Region to retrieve the Private Link configuration for. + +## Attributes Reference + +* `service_alias` - The service alias to establish a connection to. +* `domain_name` - The domain name to used in when configuring a private hosted zone in the VNet connection. diff --git a/docs/data-sources/ec_gcp_private_service_connect_endpoint.md b/docs/data-sources/ec_gcp_private_service_connect_endpoint.md new file mode 100644 index 000000000..9f8a24e0f --- /dev/null +++ b/docs/data-sources/ec_gcp_private_service_connect_endpoint.md @@ -0,0 +1,28 @@ +--- +page_title: "Elastic Cloud: ec_gcp_private_service_connect_endpoint" +description: |- + Retrieves infomation about the GCP Private Service Connect configuration for a given region. +--- + +# Data Source: ec_gcp_private_service_connect_endpoint + +Use this data source to retrieve information about the Azure Private Link configuration for a given region. Further documentation on how to establish a PrivateLink connection can be found in the ESS [documentation](https://www.elastic.co/guide/en/cloud/current/ec-traffic-filtering-psc.html). + +~> **NOTE:** This data source provides data relevant to the Elasticsearch Service (ESS) only, and should not be used for ECE. + +## Example Usage + +```hcl +data "ec_gcp_private_service_connect_endpoint" "us-central1" { + region = "us-central1" +} +``` + +## Argument Reference + +* `region` (Required) - Region to retrieve the Private Link configuration for. + +## Attributes Reference + +* `service_attachment_uri` - The service attachment URI to attach the PSC endpoint to. +* `domain_name` - The domain name to point towards the PSC endpoint. diff --git a/docs/guides/configuring-sso-ec-deployment.md b/docs/guides/configuring-sso-ec-deployment.md index ea046a065..5b7e69d17 100644 --- a/docs/guides/configuring-sso-ec-deployment.md +++ b/docs/guides/configuring-sso-ec-deployment.md @@ -7,7 +7,7 @@ description: |- # Configuring a SAML provider for an Elastic Cloud Deployment -A common use case for the Elastic Cloud Terraform provider is to spin up an Elastic Cloud deployment preconfigured with an SSO Identity provider (SAML2.0 or OIDC based) +A common use case for the Elastic Cloud Terraform provider is to spin up an Elastic Cloud deployment preconfigured with an SSO Identity provider (SAML2.0 or OIDC based) Relying on the URL generated by Elastic Cloud creates a cyclic dependency, where the Elasticsearch cluster configuration requires the Kibana URL, which only exists after the deployment is created. You can avoid the cyclic dependency by configuring the deployment with an alias, which allows you to know the final Kibana URL before the deployment is created. @@ -31,27 +31,27 @@ resource "ec_deployment" "elastic-sso" { version = "7.17.5" deployment_template_id = "aws-compute-optimized-v3" - elasticsearch { - topology { - id = "hot_content" + elasticsearch = { + hot = { size = "8g" zone_count = 2 } - topology { - id = "warm" + warm = { size = "8g" zone_count = 2 } - config { + config = { # The URL domain suffix that is used in this example is often different for other Elasticsearch Service regions. Please check the appropriate domain suffix for your used region. user_settings_yaml = templatefile("./es.yml", { kibana_url = format("https://%s-%s.kb.us-east-1.aws.found.io:9243", var.name, substr("${random_uuid.uuid.result}", 0, 6)) }) } } - kibana { - config { + kibana = { + topology = {} + + config = { user_settings_yaml = file("./kb.yml") } } @@ -62,7 +62,7 @@ resource "ec_deployment" "elastic-sso" { Let's take a closer look at one specific argument here: ```hcl -format("%s-%s",var.name,substr("${random_uuid.uuid.result}",0,6)) +name = format("%s-%s", var.name, substr("${random_uuid.uuid.result}", 0, 6)) ``` This will tell terraform to create a string, that looks like `-<6-digits-of-uuid>` @@ -71,9 +71,9 @@ You will configure the deployment alias field to be the same, so if the deployme Then, by using a variable in the `es.yml` file and a terraform templating mechanism, you can generate your own `es.yml` file. Your variable is named kibana_url, as seen in the ec_deployment resource: ```hcl - config{ - user_settings_yaml = templatefile("./es.yml",{kibana_url=format("https://%s-%s.kb.us-east-1.aws.found.io:9243",var.name,substr("${random_uuid.uuid.result}",0,6))}) - } +config = { + user_settings_yaml = templatefile("./es.yml", { kibana_url = format("https://%s-%s.kb.us-east-1.aws.found.io:9243", var.name, substr("${random_uuid.uuid.result}", 0, 6)) }) +} ``` This specific template uses your name and UUID to determine the URL for the Elasticsearch deployment before it is even created, and stores it in the `es.yml` file. diff --git a/docs/resources/ec_deployment.md b/docs/resources/ec_deployment.md index 305cad354..ef2a9e377 100644 --- a/docs/resources/ec_deployment.md +++ b/docs/resources/ec_deployment.md @@ -35,13 +35,17 @@ resource "ec_deployment" "example_minimal" { version = data.ec_stack.latest.version deployment_template_id = "aws-io-optimized-v2" - elasticsearch {} + elasticsearch = { + hot = { + autoscaling = {} + } + } - kibana {} + kibana = {} - integrations_server {} + integrations_server = {} - enterprise_search {} + enterprise_search = {} } ``` @@ -58,56 +62,54 @@ resource "ec_deployment" "example_minimal" { version = data.ec_stack.latest.version deployment_template_id = "aws-io-optimized-v2" - elasticsearch { + elasticsearch = { autoscale = "true" # If `autoscale` is set, all topology elements that # - either set `size` in the plan or # - have non-zero default `max_size` (that is read from the deployment templates's `autoscaling_max` value) - # have to be listed in alphabetical order of their `id` fields, - # even if their blocks don't specify other fields beside `id` - topology { - id = "cold" + # have to be listed even if their blocks don't specify other fields beside `id` + + cold = { + autoscaling = {} } - topology { - id = "frozen" + frozen = { + autoscaling = {} } - topology { - id = "hot_content" + hot = { size = "8g" - autoscaling { + autoscaling = { max_size = "128g" max_size_resource = "memory" } } - topology { - id = "ml" + ml = { + autoscaling = {} } - topology { - id = "warm" + warm = { + autoscaling = {} } - } # Initial size for `hot_content` tier is set to 8g # so `hot_content`'s size has to be added to the `ignore_changes` meta-argument to ignore future modifications that can be made by the autoscaler lifecycle { ignore_changes = [ - elasticsearch[0].topology[2].size + elasticsearch.hot.size ] } - kibana {} + kibana = {} - integrations_server {} + integrations_server = {} - enterprise_search {} + enterprise_search = {} } ``` @@ -128,12 +130,16 @@ resource "ec_deployment" "example_observability" { version = data.ec_stack.latest.version deployment_template_id = "aws-io-optimized-v2" - elasticsearch {} + elasticsearch = { + hot = { + autoscaling = {} + } + } - kibana {} + kibana = {} # Optional observability settings - observability { + observability = { deployment_id = ec_deployment.example_minimal.id } } @@ -141,7 +147,7 @@ resource "ec_deployment" "example_observability" { It is possible to enable observability without using a second deployment, by storing the observability data in the current deployment. To enable this, set `deployment_id` to `self`. ```hcl -observability { +observability = { deployment_id = "self" } ``` @@ -161,10 +167,10 @@ resource "ec_deployment" "source_deployment" { version = data.ec_stack.latest.version deployment_template_id = "aws-io-optimized-v2" - elasticsearch { - topology { - id = "hot_content" - size = "1g" + elasticsearch = { + hot = { + size = "1g" + autoscaling = {} } } } @@ -176,15 +182,18 @@ resource "ec_deployment" "ccs" { version = data.ec_stack.latest.version deployment_template_id = "aws-cross-cluster-search-v2" - elasticsearch { - remote_cluster { + elasticsearch = { + hot = { + autoscalign = {} + } + remote_cluster = [{ deployment_id = ec_deployment.source_deployment.id alias = ec_deployment.source_deployment.name ref_id = ec_deployment.source_deployment.elasticsearch.0.ref_id - } + }] } - kibana {} + kibana = {} } ``` @@ -205,7 +214,11 @@ resource "ec_deployment" "with_tags" { version = data.ec_stack.latest.version deployment_template_id = "aws-io-optimized-v2" - elasticsearch {} + elasticsearch = { + hot = { + autoscaling = {} + } + } tags = { owner = "elastic cloud" @@ -231,10 +244,13 @@ resource "ec_deployment" "with_tags" { version = data.ec_stack.latest.version deployment_template_id = "aws-io-optimized-v2" - elasticsearch { - strategy { - type = "rolling_all" + elasticsearch = { + hot = { + autoscaling = {} } + strategy = [{ + type = "rolling_all" + }] } tags = { @@ -396,7 +412,7 @@ The optional `elasticsearch.strategy` allows you to choose the configuration str * `grow_and_shrink` Add all nodes with the new changes before to stop any node. * `rolling_grow_and_shrink` Add nodes one by one replacing the existing ones when the new node is ready. * `rolling_all` Stop all nodes, perform the changes and start all nodes. - + #### Kibana The optional `kibana` block supports the following arguments: @@ -405,11 +421,6 @@ The optional `kibana` block supports the following arguments: * `elasticsearch_cluster_ref_id` - (Optional) This field references the `ref_id` of the deployment Elasticsearch cluster. The default value `main-elasticsearch` is recommended. * `ref_id` - (Optional) Can be set on the Kibana resource. The default value `main-kibana` is recommended. * `config` (Optional) Kibana settings applied to all topologies unless overridden in the `topology` element. - -##### Topology - -The optional `kibana.topology` block supports the following arguments: - * `instance_configuration_id` - (Optional) Default instance configuration of the deployment template. No need to change this value since Kibana has only one _instance type_. * `size` - (Optional) Amount of memory (RAM) per topology element in the "g" notation. When omitted, it defaults to the deployment template value. * `size_resource` - (Optional) Type of resource to which the size is assigned. Defaults to `"memory"`. @@ -432,11 +443,6 @@ The optional `integrations_server` block supports the following arguments: * `elasticsearch_cluster_ref_id` - (Optional) This field references the `ref_id` of the deployment Elasticsearch cluster. The default value `main-elasticsearch` is recommended. * `ref_id` - (Optional) Can be set on the Integrations Server resource. The default value `main-integrations_server` is recommended. * `config` (Optional) Integrations Server settings applied to all topologies unless overridden in the `topology` element. - -##### Topology - -The optional `integrations_server.topology` block supports the following arguments: - * `instance_configuration_id` - (Optional) Default instance configuration of the deployment template. No need to change this value since Integrations Server has only one _instance type_. * `size` - (Optional) Amount of memory (RAM) per topology element in the "g" notation. When omitted, it defaults to the deployment template value. * `size_resource` - (Optional) Type of resource to which the size is assigned. Defaults to `"memory"`. @@ -456,11 +462,6 @@ The optional `apm` block supports the following arguments: * `elasticsearch_cluster_ref_id` - (Optional) This field references the `ref_id` of the deployment Elasticsearch cluster. The default value `main-elasticsearch` is recommended. * `ref_id` - (Optional) Can be set on the APM resource. The default value `main-apm` is recommended. * `config` (Optional) APM settings applied to all topologies unless overridden in the `topology` element. - -##### Topology - -The optional `apm.topology` block supports the following arguments: - * `instance_configuration_id` - (Optional) Default instance configuration of the deployment template. No need to change this value since APM has only one _instance type_. * `size` - (Optional) Amount of memory (RAM) per topology element in the "g" notation. When omitted, it defaults to the deployment template value. * `size_resource` - (Optional) Type of resource to which the size is assigned. Defaults to `"memory"`. @@ -484,11 +485,6 @@ The optional `enterprise_search` block supports the following arguments: * `elasticsearch_cluster_ref_id` - (Optional) This field references the `ref_id` of the deployment Elasticsearch cluster. The default value `main-elasticsearch` is recommended. * `ref_id` - (Optional) Can be set on the Enterprise Search resource. The default value `main-enterprise_search` is recommended. * `config` (Optional) Enterprise Search settings applied to all topologies unless overridden in the `topology` element. - -##### Topology - -The optional `enterprise_search.topology` block supports the following settings: - * `instance_configuration_id` - (Optional) Default instance configuration of the deployment template. To change it, use the [full list](https://www.elastic.co/guide/en/cloud/current/ec-regions-templates-instances.html) of regions and deployment templates available in ESS. * `size` - (Optional) Amount of memory (RAM) per `topology` element in the "g" notation. When omitted, it defaults to the deployment template value. * `size_resource` - (Optional) Type of resource to which the size is assigned. Defaults to `"memory"`. @@ -539,6 +535,8 @@ In addition to all the arguments above, the following attributes are exported: * `integrations_server.#.region` - Integrations Server region. * `integrations_server.#.http_endpoint` - Integrations Server resource HTTP endpoint. * `integrations_server.#.https_endpoint` - Integrations Server resource HTTPs endpoint. +* `integrations_server.#.fleet_https_endpoint` - HTTPs endpoint for Fleet Server. +* `integrations_server.#.apm_https_endpoint` - HTTPs endpoint for APM Server. * `apm.#.resource_id` - APM resource unique identifier. * `apm.#.region` - APM region. * `apm.#.http_endpoint` - APM resource HTTP endpoint. diff --git a/docs/resources/ec_deployment_elasticsearch_keystore.md b/docs/resources/ec_deployment_elasticsearch_keystore.md index 0091f8ae8..6a5b1677e 100644 --- a/docs/resources/ec_deployment_elasticsearch_keystore.md +++ b/docs/resources/ec_deployment_elasticsearch_keystore.md @@ -63,7 +63,11 @@ resource "ec_deployment" "example_keystore" { version = data.ec_stack.latest.version deployment_template_id = "aws-io-optimized-v2" - elasticsearch {} + elasticsearch = { + hot = { + autoscaling = {} + } + } } # Create the keystore secret entry diff --git a/docs/resources/ec_deployment_traffic_filter.md b/docs/resources/ec_deployment_traffic_filter.md index 914b8a10c..565f4876a 100644 --- a/docs/resources/ec_deployment_traffic_filter.md +++ b/docs/resources/ec_deployment_traffic_filter.md @@ -33,9 +33,13 @@ resource "ec_deployment" "example_minimal" { ] # Use the deployment template defaults - elasticsearch {} + elasticsearch = { + hot = { + autoscaling = {} + } + } - kibana {} + kibana = {} } resource "ec_deployment_traffic_filter" "example" { @@ -76,9 +80,13 @@ resource "ec_deployment" "example_minimal" { ] # Use the deployment template defaults - elasticsearch {} + elasticsearch = { + hot = { + autoscaling = {} + } + } - kibana {} + kibana = {} } resource "ec_deployment_traffic_filter" "azure" { @@ -121,9 +129,13 @@ resource "ec_deployment" "example_minimal" { ] # Use the deployment template defaults - elasticsearch {} + elasticsearch = { + hot = { + autoscaling = {} + } + } - kibana {} + kibana = {} } resource "ec_deployment_traffic_filter" "gcp_psc" { @@ -143,7 +155,7 @@ resource "ec_deployment_traffic_filter" "gcp_psc" { The following arguments are supported: * `name` - (Required) Name of the ruleset. -* `type` - (Required) Type of the ruleset. It can be `"ip"`, `"vpce"` or `"azure_private_endpoint"`. +* `type` - (Required) Type of the ruleset. It can be `"ip"`, `"vpce"`, `"azure_private_endpoint"`, or `"gcp_private_service_connect_endpoint"`. * `region` - (Required) Filter region, the ruleset can only be attached to deployments in the specific region. * `rule` (Required) Rule block, which can be specified multiple times for multiple rules. * `include_by_default` - (Optional) To automatically include the ruleset in the new deployments. Defaults to `false`. diff --git a/ec/acc/acc_prereq.go b/ec/acc/acc_prereq.go index c0709e41f..91f00f650 100644 --- a/ec/acc/acc_prereq.go +++ b/ec/acc/acc_prereq.go @@ -18,15 +18,12 @@ package acc import ( - "context" "net/http" "os" "testing" "github.com/hashicorp/terraform-plugin-framework/providerserver" "github.com/hashicorp/terraform-plugin-go/tfprotov6" - "github.com/hashicorp/terraform-plugin-mux/tf5to6server" - "github.com/hashicorp/terraform-plugin-mux/tf6muxserver" "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/auth" @@ -42,18 +39,7 @@ var testAccProviderFactory = protoV6ProviderFactories() func protoV6ProviderFactories() map[string]func() (tfprotov6.ProviderServer, error) { return map[string]func() (tfprotov6.ProviderServer, error){ - "ec": func() (tfprotov6.ProviderServer, error) { - return tf6muxserver.NewMuxServer(context.Background(), - func() tfprotov6.ProviderServer { - upgradedSdkProvider, _ := tf5to6server.UpgradeServer( - context.Background(), - ec.LegacyProvider().GRPCProvider, - ) - return upgradedSdkProvider - }, - providerserver.NewProtocol6(ec.New("acc-tests")), - ) - }, + "ec": providerserver.NewProtocol6WithError(ec.New("acc-tests")), } } diff --git a/ec/acc/datasource_deployment_basic_test.go b/ec/acc/datasource_deployment_basic_test.go index a38dc8d75..d0944952f 100644 --- a/ec/acc/datasource_deployment_basic_test.go +++ b/ec/acc/datasource_deployment_basic_test.go @@ -52,55 +52,55 @@ func TestAccDatasourceDeployment_basic(t *testing.T) { resource.TestCheckResourceAttrPair(datasourceName, "traffic_filter.#", resourceName, "traffic_filter.#"), // Elasticsearch - resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.ref_id", resourceName, "elasticsearch.0.ref_id"), - resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.cloud_id", resourceName, "elasticsearch.0.cloud_id"), - resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.resource_id", resourceName, "elasticsearch.0.resource_id"), - resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.http_endpoint_id", resourceName, "elasticsearch.0.http_endpoint_id"), - resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.https_endpoint_id", resourceName, "elasticsearch.0.https_endpoint_id"), - resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.instance_configuration_id", resourceName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.size", resourceName, "elasticsearch.0.topology.0.size"), - resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.size_resource", resourceName, "elasticsearch.0.topology.0.size_resource"), - resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.zone_count", resourceName, "elasticsearch.0.topology.0.zone_count"), - resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.*.node_roles.*", resourceName, "elasticsearch.0.topology.*.node_roles.*"), + resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.ref_id", resourceName, "elasticsearch.ref_id"), + resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.cloud_id", resourceName, "elasticsearch.cloud_id"), + resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.resource_id", resourceName, "elasticsearch.resource_id"), + resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.http_endpoint_id", resourceName, "elasticsearch.http_endpoint_id"), + resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.https_endpoint_id", resourceName, "elasticsearch.https_endpoint_id"), + resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.instance_configuration_id", resourceName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.size", resourceName, "elasticsearch.hot.size"), + resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.size_resource", resourceName, "elasticsearch.hot.size_resource"), + resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.zone_count", resourceName, "elasticsearch.hot.zone_count"), + resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.node_roles.*", resourceName, "elasticsearch.hot.node_roles.*"), // Kibana - resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.elasticsearch_cluster_ref_id", resourceName, "kibana.0.elasticsearch_cluster_ref_id"), - resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.ref_id", resourceName, "kibana.0.ref_id"), - resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.cloud_id", resourceName, "kibana.0.cloud_id"), - resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.resource_id", resourceName, "kibana.0.resource_id"), - resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.http_endpoint_id", resourceName, "kibana.0.http_endpoint_id"), - resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.https_endpoint_id", resourceName, "kibana.0.https_endpoint_id"), - resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.topology.0.instance_configuration_id", resourceName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.topology.0.size", resourceName, "kibana.0.topology.0.size"), - resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.topology.0.size_resource", resourceName, "kibana.0.topology.0.size_resource"), - resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.topology.0.zone_count", resourceName, "kibana.0.topology.0.zone_count"), + resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.elasticsearch_cluster_ref_id", resourceName, "kibana.elasticsearch_cluster_ref_id"), + resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.ref_id", resourceName, "kibana.ref_id"), + resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.cloud_id", resourceName, "kibana.cloud_id"), + resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.resource_id", resourceName, "kibana.resource_id"), + resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.http_endpoint_id", resourceName, "kibana.http_endpoint_id"), + resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.https_endpoint_id", resourceName, "kibana.https_endpoint_id"), + resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.topology.0.instance_configuration_id", resourceName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.topology.0.size", resourceName, "kibana.size"), + resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.topology.0.size_resource", resourceName, "kibana.size_resource"), + resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.topology.0.zone_count", resourceName, "kibana.zone_count"), // APM - resource.TestCheckResourceAttrPair(datasourceName, "apm.0.elasticsearch_cluster_ref_id", resourceName, "apm.0.elasticsearch_cluster_ref_id"), - resource.TestCheckResourceAttrPair(datasourceName, "apm.0.ref_id", resourceName, "apm.0.ref_id"), - resource.TestCheckResourceAttrPair(datasourceName, "apm.0.cloud_id", resourceName, "apm.0.cloud_id"), - resource.TestCheckResourceAttrPair(datasourceName, "apm.0.resource_id", resourceName, "apm.0.resource_id"), - resource.TestCheckResourceAttrPair(datasourceName, "apm.0.http_endpoint_id", resourceName, "apm.0.http_endpoint_id"), - resource.TestCheckResourceAttrPair(datasourceName, "apm.0.https_endpoint_id", resourceName, "apm.0.https_endpoint_id"), - resource.TestCheckResourceAttrPair(datasourceName, "apm.0.topology.0.instance_configuration_id", resourceName, "apm.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttrPair(datasourceName, "apm.0.topology.0.size", resourceName, "apm.0.topology.0.size"), - resource.TestCheckResourceAttrPair(datasourceName, "apm.0.topology.0.size_resource", resourceName, "apm.0.topology.0.size_resource"), - resource.TestCheckResourceAttrPair(datasourceName, "apm.0.topology.0.zone_count", resourceName, "apm.0.topology.0.zone_count"), + resource.TestCheckResourceAttrPair(datasourceName, "apm.0.elasticsearch_cluster_ref_id", resourceName, "apm.elasticsearch_cluster_ref_id"), + resource.TestCheckResourceAttrPair(datasourceName, "apm.0.ref_id", resourceName, "apm.ref_id"), + resource.TestCheckResourceAttrPair(datasourceName, "apm.0.cloud_id", resourceName, "apm.cloud_id"), + resource.TestCheckResourceAttrPair(datasourceName, "apm.0.resource_id", resourceName, "apm.resource_id"), + resource.TestCheckResourceAttrPair(datasourceName, "apm.0.http_endpoint_id", resourceName, "apm.http_endpoint_id"), + resource.TestCheckResourceAttrPair(datasourceName, "apm.0.https_endpoint_id", resourceName, "apm.https_endpoint_id"), + resource.TestCheckResourceAttrPair(datasourceName, "apm.0.topology.0.instance_configuration_id", resourceName, "apm.instance_configuration_id"), + resource.TestCheckResourceAttrPair(datasourceName, "apm.0.topology.0.size", resourceName, "apm.size"), + resource.TestCheckResourceAttrPair(datasourceName, "apm.0.topology.0.size_resource", resourceName, "apm.size_resource"), + resource.TestCheckResourceAttrPair(datasourceName, "apm.0.topology.0.zone_count", resourceName, "apm.zone_count"), // Enterprise Search - resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.elasticsearch_cluster_ref_id", resourceName, "enterprise_search.0.elasticsearch_cluster_ref_id"), - resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.ref_id", resourceName, "enterprise_search.0.ref_id"), - resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.cloud_id", resourceName, "enterprise_search.0.cloud_id"), - resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.resource_id", resourceName, "enterprise_search.0.resource_id"), - resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.http_endpoint_id", resourceName, "enterprise_search.0.http_endpoint_id"), - resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.https_endpoint_id", resourceName, "enterprise_search.0.https_endpoint_id"), - resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.instance_configuration_id", resourceName, "enterprise_search.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.size", resourceName, "enterprise_search.0.topology.0.size"), - resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.size_resource", resourceName, "enterprise_search.0.topology.0.size_resource"), - resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.zone_count", resourceName, "enterprise_search.0.topology.0.zone_count"), - resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.node_type_appserver", resourceName, "enterprise_search.0.topology.0.node_type_appserver"), - resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.node_type_connector", resourceName, "enterprise_search.0.topology.0.node_type_connector"), - resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.node_type_worker", resourceName, "enterprise_search.0.topology.0.node_type_worker"), + resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.elasticsearch_cluster_ref_id", resourceName, "enterprise_search.elasticsearch_cluster_ref_id"), + resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.ref_id", resourceName, "enterprise_search.ref_id"), + resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.cloud_id", resourceName, "enterprise_search.cloud_id"), + resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.resource_id", resourceName, "enterprise_search.resource_id"), + resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.http_endpoint_id", resourceName, "enterprise_search.http_endpoint_id"), + resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.https_endpoint_id", resourceName, "enterprise_search.https_endpoint_id"), + resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.instance_configuration_id", resourceName, "enterprise_search.instance_configuration_id"), + resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.size", resourceName, "enterprise_search.size"), + resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.size_resource", resourceName, "enterprise_search.size_resource"), + resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.zone_count", resourceName, "enterprise_search.zone_count"), + resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.node_type_appserver", resourceName, "enterprise_search.node_type_appserver"), + resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.node_type_connector", resourceName, "enterprise_search.node_type_connector"), + resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.node_type_worker", resourceName, "enterprise_search.node_type_worker"), ), }, { @@ -114,16 +114,16 @@ func TestAccDatasourceDeployment_basic(t *testing.T) { resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.alias", resourceName, "alias"), // Query results - resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.elasticsearch_resource_id", resourceName, "elasticsearch.0.resource_id"), - resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.kibana_resource_id", resourceName, "kibana.0.resource_id"), - resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.apm_resource_id", resourceName, "apm.0.resource_id"), - resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.enterprise_search_resource_id", resourceName, "enterprise_search.0.resource_id"), + resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.elasticsearch_resource_id", resourceName, "elasticsearch.resource_id"), + resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.kibana_resource_id", resourceName, "kibana.resource_id"), + resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.apm_resource_id", resourceName, "apm.resource_id"), + resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.enterprise_search_resource_id", resourceName, "enterprise_search.resource_id"), // Ref ID check. - resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.elasticsearch_ref_id", resourceName, "elasticsearch.0.ref_id"), - resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.kibana_ref_id", resourceName, "kibana.0.ref_id"), - resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.apm_ref_id", resourceName, "apm.0.ref_id"), - resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.enterprise_search_ref_id", resourceName, "enterprise_search.0.ref_id"), + resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.elasticsearch_ref_id", resourceName, "elasticsearch.ref_id"), + resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.kibana_ref_id", resourceName, "kibana.ref_id"), + resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.apm_ref_id", resourceName, "apm.ref_id"), + resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.enterprise_search_ref_id", resourceName, "enterprise_search.ref_id"), ), }, }, diff --git a/ec/acc/deployment_autoscaling_test.go b/ec/acc/deployment_autoscaling_test.go index 1b93ea8de..d4c0246d9 100644 --- a/ec/acc/deployment_autoscaling_test.go +++ b/ec/acc/deployment_autoscaling_test.go @@ -44,81 +44,60 @@ func TestAccDeployment_autoscaling(t *testing.T) { { Config: cfgF(startCfg), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.autoscale", "true"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "5"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.autoscale", "true"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "cold"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "0g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.autoscaling.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.autoscaling.0.max_size", "58g"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.cold.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.cold.size", "0g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.cold.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.cold.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.cold.autoscaling.max_size", "58g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.id", "frozen"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size", "0g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.autoscaling.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.autoscaling.0.max_size", "120g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.frozen.size", "0g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.frozen.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.frozen.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.frozen.autoscaling.max_size", "120g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.id", "hot_content"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.autoscaling.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.autoscaling.0.max_size", "8g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.autoscaling.max_size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.3.id", "ml"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.3.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.3.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.3.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.3.autoscaling.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.3.autoscaling.0.max_size", "4g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.3.autoscaling.0.min_size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.ml.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.ml.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.ml.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.ml.autoscaling.max_size", "4g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.ml.autoscaling.min_size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.4.id", "warm"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.4.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.4.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.4.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.4.autoscaling.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.4.autoscaling.0.max_size", "15g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.warm.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.warm.autoscaling.max_size", "15g"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, // also disables ML { Config: cfgF(disableAutoscale), - // When disabling a tier the plan will be non empty on refresh - // since the topology block is present with size = "0g". - ExpectNonEmptyPlan: true, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.autoscale", "false"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "2"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.autoscale", "false"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.autoscaling.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.autoscaling.0.max_size", "8g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.autoscaling.max_size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.id", "warm"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.autoscaling.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.autoscaling.0.max_size", "15g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.warm.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.warm.autoscaling.max_size", "15g"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, }, diff --git a/ec/acc/deployment_basic_defaults_test.go b/ec/acc/deployment_basic_defaults_test.go index 5aa745471..0b2f724b6 100644 --- a/ec/acc/deployment_basic_defaults_test.go +++ b/ec/acc/deployment_basic_defaults_test.go @@ -31,7 +31,7 @@ import ( // * Resource defaults. // * Resource declaration in the {} format. ("apm {}"). // * Topology field overrides over field defaults. -func TestAccDeployment_basic_defaults(t *testing.T) { +func TestAccDeployment_basic_defaults_first(t *testing.T) { resName := "ec_deployment.defaults" randomName := prefix + acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) startCfg := "testdata/deployment_basic_defaults_1.tf" @@ -51,31 +51,20 @@ func TestAccDeployment_basic_defaults(t *testing.T) { // Checks the defaults which are populated using a mix of // Deployment Template and schema defaults. Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "1"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "enterprise_search.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "8g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckResourceAttrSet(resName, "enterprise_search.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "enterprise_search.size", "2g"), + resource.TestCheckResourceAttr(resName, "enterprise_search.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "enterprise_search.zone_count", "1"), ), }, { @@ -83,69 +72,45 @@ func TestAccDeployment_basic_defaults(t *testing.T) { Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( // changed - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size", "1g"), - - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "apm.#", "1"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "apm.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "1"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "enterprise_search.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "8g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "kibana.size", "2g"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "apm.size", "1g"), + resource.TestCheckResourceAttrSet(resName, "apm.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "apm.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "apm.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "enterprise_search.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "enterprise_search.size", "2g"), + resource.TestCheckResourceAttr(resName, "enterprise_search.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "enterprise_search.zone_count", "1"), ), }, { // Remove all resources except Elasticsearch and Kibana and set a node type override Config: thirdConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), // In this test we're verifying that the topology for Kibana is not reset. // This is due to the terraform SDK stickyness where a removed computed block // with a previous value is the same as an empty block, so previous computed // values are used. - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "2g"), - - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttr(resName, "kibana.size", "2g"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, }, @@ -169,21 +134,14 @@ func TestAccDeployment_basic_defaults_hw(t *testing.T) { Config: cfg, // Create a deployment which only uses Elasticsearch resources Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, { @@ -191,37 +149,23 @@ func TestAccDeployment_basic_defaults_hw(t *testing.T) { // hot warm, use defaults. Config: hotWarmCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "2"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.1.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.warm.instance_configuration_id"), // Hot Warm defaults to 4g. - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "4g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size", "4g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.id", "warm"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.1.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "4g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size", "4g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.warm.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.warm.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, }, diff --git a/ec/acc/deployment_basic_tags_test.go b/ec/acc/deployment_basic_tags_test.go index e74719be2..43dfae2bf 100644 --- a/ec/acc/deployment_basic_tags_test.go +++ b/ec/acc/deployment_basic_tags_test.go @@ -50,21 +50,14 @@ func TestAccDeployment_basic_tags(t *testing.T) { // Create a deployment with tags. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), // Tags resource.TestCheckResourceAttr(resName, "tags.%", "2"), @@ -76,21 +69,14 @@ func TestAccDeployment_basic_tags(t *testing.T) { // Remove a tag. Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), // Tags resource.TestCheckResourceAttr(resName, "tags.%", "1"), @@ -101,21 +87,14 @@ func TestAccDeployment_basic_tags(t *testing.T) { // Remove the tags block. Config: thirdConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), // Tags resource.TestCheckResourceAttr(resName, "tags.%", "0"), @@ -125,21 +104,14 @@ func TestAccDeployment_basic_tags(t *testing.T) { // Add the tags block with a single tag. Config: fourthConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), // Tags resource.TestCheckResourceAttr(resName, "tags.%", "1"), diff --git a/ec/acc/deployment_basic_test.go b/ec/acc/deployment_basic_test.go index 42e35d401..d39415191 100644 --- a/ec/acc/deployment_basic_test.go +++ b/ec/acc/deployment_basic_test.go @@ -50,19 +50,18 @@ func TestAccDeployment_basic_tf(t *testing.T) { Config: cfg, Check: checkBasicDeploymentResource(resName, randomName, deploymentVersion, resource.TestCheckResourceAttr(resName, "alias", randomAlias), - resource.TestCheckResourceAttr(resName, "apm.0.config.#", "0"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.config.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.config.#", "0"), - resource.TestCheckResourceAttr(resName, "traffic_filter.#", "0"), + resource.TestCheckNoResourceAttr(resName, "apm.config"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search.config"), + resource.TestCheckNoResourceAttr(resName, "traffic_filter"), // Ensure at least 1 account is trusted (self). - resource.TestCheckResourceAttr(resName, "elasticsearch.0.trust_account.#", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.trust_account.#", "1"), ), }, { Config: cfgWithTrafficFilter, Check: checkBasicDeploymentResource(resName, randomName, deploymentVersion, // Ensure at least 1 account is trusted (self). It isn't deleted. - resource.TestCheckResourceAttr(resName, "elasticsearch.0.trust_account.#", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.trust_account.#", "1"), resource.TestCheckResourceAttr(resName, "traffic_filter.#", "1"), ), }, @@ -76,9 +75,7 @@ func TestAccDeployment_basic_tf(t *testing.T) { { Config: cfg, Check: checkBasicDeploymentResource(resName, randomName, deploymentVersion, - resource.TestCheckResourceAttr(resName, "elasticsearch.0.config.#", "0"), resource.TestCheckResourceAttr(resName, "traffic_filter.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.0.config.#", "0"), ), }, }, @@ -88,9 +85,9 @@ func TestAccDeployment_basic_tf(t *testing.T) { func TestAccDeployment_basic_config(t *testing.T) { resName := "ec_deployment.basic" randomName := prefix + acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - startCfg := "testdata/deployment_basic_settings_config_1.tf" + importCfg := "testdata/deployment_basic_settings_config_import.tf" settingsConfig := "testdata/deployment_basic_settings_config_2.tf" - cfg := fixtureAccDeploymentResourceBasicWithApps(t, startCfg, randomName, getRegion(), defaultTemplate) + cfg := fixtureAccDeploymentResourceBasicWithApps(t, importCfg, randomName, getRegion(), defaultTemplate) settingsConfigCfg := fixtureAccDeploymentResourceBasicWithApps(t, settingsConfig, randomName, getRegion(), defaultTemplate) deploymentVersion, err := latestStackVersion() if err != nil { @@ -105,24 +102,20 @@ func TestAccDeployment_basic_config(t *testing.T) { { Config: settingsConfigCfg, Check: checkBasicDeploymentResource(resName, randomName, deploymentVersion, - resource.TestCheckResourceAttr(resName, "elasticsearch.0.config.0.user_settings_yaml", "action.auto_create_index: true"), - resource.TestCheckResourceAttr(resName, "apm.0.config.0.debug_enabled", "true"), - resource.TestCheckResourceAttr(resName, "apm.0.config.0.user_settings_json", `{"apm-server.rum.enabled":true}`), - resource.TestCheckResourceAttr(resName, "kibana.0.config.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.config.0.user_settings_yaml", "csp.warnLegacyBrowsers: true"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.config.#", "1"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.config.0.user_settings_yaml", "# comment"), + resource.TestCheckResourceAttr(resName, "elasticsearch.config.user_settings_yaml", "action.auto_create_index: true"), + resource.TestCheckResourceAttr(resName, "apm.config.debug_enabled", "true"), + resource.TestCheckResourceAttr(resName, "apm.config.user_settings_json", `{"apm-server.rum.enabled":true}`), + resource.TestCheckResourceAttr(resName, "kibana.config.user_settings_yaml", "csp.warnLegacyBrowsers: true"), + resource.TestCheckResourceAttr(resName, "enterprise_search.config.user_settings_yaml", "# comment"), ), }, { Config: cfg, Check: checkBasicDeploymentResource(resName, randomName, deploymentVersion, - resource.TestCheckResourceAttr(resName, "apm.0.config.#", "1"), - // The config block is unset in the configuration so it disappears from the state. - resource.TestCheckResourceAttr(resName, "elasticsearch.0.config.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.0.config.0.debug_enabled", "false"), - resource.TestCheckResourceAttr(resName, "kibana.0.config.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.config.#", "0"), + resource.TestCheckResourceAttr(resName, "apm.config.%", "0"), + resource.TestCheckNoResourceAttr(resName, "elasticsearch.config.user_settings_yaml"), + resource.TestCheckResourceAttr(resName, "kibana.config.%", "0"), + resource.TestCheckResourceAttr(resName, "enterprise_search.config.%", "0"), ), }, // Import resource without complex ID @@ -194,32 +187,28 @@ func checkBasicDeploymentResource(resName, randomDeploymentName, deploymentVersi testAccCheckDeploymentExists(resName), resource.TestCheckResourceAttr(resName, "name", randomDeploymentName), resource.TestCheckResourceAttr(resName, "region", getRegion()), - resource.TestCheckResourceAttr(resName, "apm.#", "1"), - resource.TestCheckResourceAttr(resName, "apm.0.region", getRegion()), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "apm.region", getRegion()), + resource.TestCheckResourceAttr(resName, "apm.size", "1g"), + resource.TestCheckResourceAttr(resName, "apm.size_resource", "memory"), resource.TestCheckResourceAttrSet(resName, "apm_secret_token"), resource.TestCheckResourceAttrSet(resName, "elasticsearch_username"), resource.TestCheckResourceAttrSet(resName, "elasticsearch_password"), - resource.TestCheckResourceAttrSet(resName, "apm.0.http_endpoint"), - resource.TestCheckResourceAttrSet(resName, "apm.0.https_endpoint"), - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.region", getRegion()), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.http_endpoint"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.https_endpoint"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.region", getRegion()), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.http_endpoint"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.https_endpoint"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "1"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.region", getRegion()), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "enterprise_search.0.http_endpoint"), - resource.TestCheckResourceAttrSet(resName, "enterprise_search.0.https_endpoint"), + resource.TestCheckResourceAttrSet(resName, "apm.http_endpoint"), + resource.TestCheckResourceAttrSet(resName, "apm.https_endpoint"), + resource.TestCheckResourceAttr(resName, "elasticsearch.region", getRegion()), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.http_endpoint"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.https_endpoint"), + resource.TestCheckResourceAttr(resName, "kibana.region", getRegion()), + resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "kibana.http_endpoint"), + resource.TestCheckResourceAttrSet(resName, "kibana.https_endpoint"), + resource.TestCheckResourceAttr(resName, "enterprise_search.region", getRegion()), + resource.TestCheckResourceAttr(resName, "enterprise_search.size", "2g"), + resource.TestCheckResourceAttr(resName, "enterprise_search.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "enterprise_search.http_endpoint"), + resource.TestCheckResourceAttrSet(resName, "enterprise_search.https_endpoint"), }, checks...)...) } diff --git a/ec/acc/deployment_ccs_test.go b/ec/acc/deployment_ccs_test.go index 0edd4ffd5..d7f41418f 100644 --- a/ec/acc/deployment_ccs_test.go +++ b/ec/acc/deployment_ccs_test.go @@ -53,50 +53,43 @@ func TestAccDeployment_ccs(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( // CCS Checks - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.0.topology.0.instance_configuration_id"), + resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.hot.instance_configuration_id"), // CCS defaults to 1g. - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.size_resource", "memory"), + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.hot.size", "1g"), + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.hot.size_resource", "memory"), // Remote cluster settings - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.remote_cluster.#", "3"), - resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.0.remote_cluster.0.deployment_id"), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.remote_cluster.0.alias", fmt.Sprint(sourceRandomName, "-0")), - resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.0.remote_cluster.1.deployment_id"), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.remote_cluster.1.alias", fmt.Sprint(sourceRandomName, "-1")), - resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.0.remote_cluster.2.deployment_id"), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.remote_cluster.2.alias", fmt.Sprint(sourceRandomName, "-2")), - - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(ccsResName, "kibana.#", "0"), - resource.TestCheckResourceAttr(ccsResName, "apm.#", "0"), - resource.TestCheckResourceAttr(ccsResName, "enterprise_search.#", "0"), - + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.remote_cluster.#", "3"), + resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.remote_cluster.0.deployment_id"), + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.remote_cluster.0.alias", fmt.Sprint(sourceRandomName, "-0")), + resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.remote_cluster.1.deployment_id"), + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.remote_cluster.1.alias", fmt.Sprint(sourceRandomName, "-1")), + resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.remote_cluster.2.deployment_id"), + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.remote_cluster.2.alias", fmt.Sprint(sourceRandomName, "-2")), + + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.hot.node_type_data"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.hot.node_type_ingest"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.hot.node_type_master"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.hot.node_type_ml"), + resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.hot.zone_count", "1"), + resource.TestCheckNoResourceAttr(sourceResName, "kibana"), + resource.TestCheckNoResourceAttr(sourceResName, "apm"), + resource.TestCheckNoResourceAttr(sourceResName, "enterprise_search"), // Source Checks - resource.TestCheckResourceAttr(sourceResName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(sourceResName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(sourceResName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(sourceResName, "elasticsearch.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(sourceResName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(sourceResName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(sourceResName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(sourceResName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(sourceResName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(sourceResName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(sourceResName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(sourceResName, "elasticsearch.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(sourceResName, "kibana.#", "0"), - resource.TestCheckResourceAttr(sourceResName, "apm.#", "0"), - resource.TestCheckResourceAttr(sourceResName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(sourceResName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttr(sourceResName, "elasticsearch.hot.size", "1g"), + resource.TestCheckResourceAttr(sourceResName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.hot.node_type_data"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.hot.node_type_ingest"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.hot.node_type_master"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.hot.node_type_ml"), + resource.TestCheckResourceAttrSet(sourceResName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttr(sourceResName, "elasticsearch.hot.zone_count", "1"), + resource.TestCheckNoResourceAttr(sourceResName, "kibana"), + resource.TestCheckNoResourceAttr(sourceResName, "apm"), + resource.TestCheckNoResourceAttr(sourceResName, "enterprise_search"), ), }, { @@ -104,29 +97,25 @@ func TestAccDeployment_ccs(t *testing.T) { Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( // Changes. - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.size_resource", "memory"), - - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.remote_cluster.#", "0"), - - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(ccsResName, "kibana.#", "1"), - resource.TestCheckResourceAttr(ccsResName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(ccsResName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(ccsResName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(ccsResName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(ccsResName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(ccsResName, "apm.#", "0"), - resource.TestCheckResourceAttr(ccsResName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.hot.size", "2g"), + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.hot.size_resource", "memory"), + + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.remote_cluster.#", "0"), + + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.hot.node_type_data"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.hot.node_type_ingest"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.hot.node_type_master"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.hot.node_type_ml"), + + resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.hot.zone_count", "1"), + resource.TestCheckResourceAttr(ccsResName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttrSet(ccsResName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(ccsResName, "kibana.size", "1g"), + resource.TestCheckResourceAttr(ccsResName, "kibana.size_resource", "memory"), + resource.TestCheckNoResourceAttr(ccsResName, "apm"), + resource.TestCheckNoResourceAttr(ccsResName, "enterprise_search"), ), }, }, diff --git a/ec/acc/deployment_compute_optimized_test.go b/ec/acc/deployment_compute_optimized_test.go index 88b97884e..f130bf016 100644 --- a/ec/acc/deployment_compute_optimized_test.go +++ b/ec/acc/deployment_compute_optimized_test.go @@ -41,57 +41,37 @@ func TestAccDeployment_computeOptimized(t *testing.T) { // Create a Compute Optimized deployment with the default settings. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "8g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, { // Change the Elasticsearch topology size and add APM instance. Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "apm.#", "1"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "apm.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "apm.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "apm.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "apm.size", "1g"), + resource.TestCheckResourceAttr(resName, "apm.size_resource", "memory"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, }, diff --git a/ec/acc/deployment_dedicated_test.go b/ec/acc/deployment_dedicated_test.go index 2ca44dfe1..73802fa5b 100644 --- a/ec/acc/deployment_dedicated_test.go +++ b/ec/acc/deployment_dedicated_test.go @@ -39,32 +39,27 @@ func TestAccDeployment_dedicated_coordinating(t *testing.T) { // Create a deployment with dedicated coordinating. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "3"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.1.instance_configuration_id"), - - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "coordinating"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.1.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size_resource", "memory"), - - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.id", "warm"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.2.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.size_resource", "memory"), - - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + + resource.TestCheckResourceAttrSet(resName, "elasticsearch.coordinating.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.coordinating.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.coordinating.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "elasticsearch.coordinating.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.coordinating.size_resource", "memory"), + + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + + resource.TestCheckResourceAttrSet(resName, "elasticsearch.warm.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.warm.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size_resource", "memory"), + + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, }, @@ -86,40 +81,34 @@ func TestAccDeployment_dedicated_master(t *testing.T) { // Create a deployment with dedicated master nodes. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "4"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.1.instance_configuration_id"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.2.instance_configuration_id"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.3.instance_configuration_id"), - - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "cold"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "1"), - - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.id", "hot_content"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.1.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.zone_count", "3"), - - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.id", "master"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.2.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.zone_count", "3"), - - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.3.id", "warm"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.3.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.3.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.3.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.3.zone_count", "2"), - - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.cold.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.master.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.warm.instance_configuration_id"), + + resource.TestCheckResourceAttr(resName, "elasticsearch.cold.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.cold.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.cold.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.cold.zone_count", "1"), + + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "3"), + + resource.TestCheckResourceAttr(resName, "elasticsearch.master.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.master.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.master.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.master.zone_count", "3"), + + resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.warm.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.warm.zone_count", "2"), + + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, }, diff --git a/ec/acc/deployment_docker_image_override_test.go b/ec/acc/deployment_docker_image_override_test.go index 7ef00c38b..be8144739 100644 --- a/ec/acc/deployment_docker_image_override_test.go +++ b/ec/acc/deployment_docker_image_override_test.go @@ -51,10 +51,10 @@ func TestAccDeployment_docker_image_override(t *testing.T) { { Config: cfgF("testdata/deployment_docker_image_override.tf"), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.0.config.0.docker_image", "docker.elastic.co/cloud-ci/elasticsearch:7.15.0-SNAPSHOT"), - resource.TestCheckResourceAttr(resName, "kibana.0.config.0.docker_image", "docker.elastic.co/cloud-ci/kibana:7.15.0-SNAPSHOT"), - resource.TestCheckResourceAttr(resName, "apm.0.config.0.docker_image", "docker.elastic.co/cloud-ci/apm:7.15.0-SNAPSHOT"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.config.0.docker_image", "docker.elastic.co/cloud-ci/enterprise-search:7.15.0-SNAPSHOT"), + resource.TestCheckResourceAttr(resName, "elasticsearch.config.docker_image", "docker.elastic.co/cloud-ci/elasticsearch:7.15.0-SNAPSHOT"), + resource.TestCheckResourceAttr(resName, "kibana.config.docker_image", "docker.elastic.co/cloud-ci/kibana:7.15.0-SNAPSHOT"), + resource.TestCheckResourceAttr(resName, "apm.config.docker_image", "docker.elastic.co/cloud-ci/apm:7.15.0-SNAPSHOT"), + resource.TestCheckResourceAttr(resName, "enterprise_search.config.docker_image", "docker.elastic.co/cloud-ci/enterprise-search:7.15.0-SNAPSHOT"), ), }, }, diff --git a/ec/acc/deployment_elasticsearch_kesytore_test.go b/ec/acc/deployment_elasticsearch_keystore_test.go similarity index 98% rename from ec/acc/deployment_elasticsearch_kesytore_test.go rename to ec/acc/deployment_elasticsearch_keystore_test.go index 9b15d7524..4ab06ad71 100644 --- a/ec/acc/deployment_elasticsearch_kesytore_test.go +++ b/ec/acc/deployment_elasticsearch_keystore_test.go @@ -123,7 +123,8 @@ func TestAccDeploymentElasticsearchKeystore_UpgradeFrom0_4_1(t *testing.T) { firstResName := resType + ".test" secondResName := resType + ".gcs_creds" randomName := prefix + acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - startCfg := "testdata/deployment_elasticsearch_keystore_1.tf" + startCfg := "testdata/deployment_elasticsearch_keystore_1_041.tf" + migratedCfg := "testdata/deployment_elasticsearch_keystore_1_migrated.tf" cfgF := func(cfg string) string { return fixtureAccDeploymentResourceBasic( @@ -156,7 +157,7 @@ func TestAccDeploymentElasticsearchKeystore_UpgradeFrom0_4_1(t *testing.T) { { PlanOnly: true, ProtoV6ProviderFactories: testAccProviderFactory, - Config: cfgF(startCfg), + Config: cfgF(migratedCfg), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(firstResName, "setting_name", "xpack.notification.slack.account.hello.secure_url"), resource.TestCheckResourceAttr(firstResName, "value", "hella"), diff --git a/ec/acc/deployment_emptyconf_test.go b/ec/acc/deployment_emptyconf_test.go index e9532d6ae..06888615f 100644 --- a/ec/acc/deployment_emptyconf_test.go +++ b/ec/acc/deployment_emptyconf_test.go @@ -43,14 +43,10 @@ func TestAccDeployment_emptyconfig(t *testing.T) { { Config: cfgF(startCfg), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.config.#", "0"), + // config has 6 attributes + resource.TestCheckResourceAttr(resName, "elasticsearch.config.%", "6"), + resource.TestCheckNoResourceAttr(resName, "elasticsearch.config.user_settings_yaml"), ), - // Since the configuration specifies a `config {}` block but - // the setting itself is `null`, the config {} block will be - // set to empty and will cause the plan to always have a diff: - // + config {}. - ExpectNonEmptyPlan: true, }, }, }) diff --git a/ec/acc/deployment_enterprise_search_test.go b/ec/acc/deployment_enterprise_search_test.go index 3570b2485..709955118 100644 --- a/ec/acc/deployment_enterprise_search_test.go +++ b/ec/acc/deployment_enterprise_search_test.go @@ -41,62 +41,40 @@ func TestAccDeployment_enterpriseSearch(t *testing.T) { // Create an Enterprise Search deployment with the default settings. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "4g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "1"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "enterprise_search.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "4g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckResourceAttr(resName, "enterprise_search.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "enterprise_search.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "enterprise_search.size", "2g"), + resource.TestCheckResourceAttr(resName, "enterprise_search.size_resource", "memory"), ), }, { // Change the Elasticsearch topology size. Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "1"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "enterprise_search.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckResourceAttr(resName, "enterprise_search.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "enterprise_search.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "enterprise_search.size", "2g"), + resource.TestCheckResourceAttr(resName, "enterprise_search.size_resource", "memory"), ), }, }, diff --git a/ec/acc/deployment_failed_upgrade_retry_test.go b/ec/acc/deployment_failed_upgrade_retry_test.go index 318c5a74c..9334cc706 100644 --- a/ec/acc/deployment_failed_upgrade_retry_test.go +++ b/ec/acc/deployment_failed_upgrade_retry_test.go @@ -47,7 +47,7 @@ func TestAccDeployment_failed_upgrade_retry(t *testing.T) { // Creates an Elasticsearch index that will make the kibana upgrade fail. PreConfig: createIndex(t, &esCreds, ".kibana_2"), Config: fixtureDeploymentDefaults(t, "testdata/deployment_upgrade_retry_2.tf"), - ExpectError: regexp.MustCompile(`\[kibana\].*Plan change failed.*`), + ExpectError: regexp.MustCompile(`\[kibana\].*Plan[ |\t|\n]+change[ |\t|\n]+failed.*`), Check: resource.ComposeAggregateTestCheckFunc( checkMajorMinorVersion(t, resName, 7, 10), ), diff --git a/ec/acc/deployment_hotwarm_test.go b/ec/acc/deployment_hotwarm_test.go index 3ca41cf9d..9a72248b8 100644 --- a/ec/acc/deployment_hotwarm_test.go +++ b/ec/acc/deployment_hotwarm_test.go @@ -46,33 +46,21 @@ func TestAccDeployment_hotwarm(t *testing.T) { // Create a Hot / Warm deployment with the default settings. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "2"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.1.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.warm.instance_configuration_id"), // Hot Warm defaults to 4g. - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "4g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size", "4g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "4g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size", "4g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.id", "warm"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.1.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.warm.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.warm.zone_count", "2"), + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, { @@ -80,32 +68,20 @@ func TestAccDeployment_hotwarm(t *testing.T) { Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( // Changes. - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.warm.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "2"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.1.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.id", "warm"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.1.node_roles.#"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.warm.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.warm.node_roles.#"), + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, }, diff --git a/ec/acc/deployment_integrations_server_test.go b/ec/acc/deployment_integrations_server_test.go index 8b000869d..f9346fbda 100644 --- a/ec/acc/deployment_integrations_server_test.go +++ b/ec/acc/deployment_integrations_server_test.go @@ -41,34 +41,22 @@ func TestAccDeployment_integrationsServer(t *testing.T) { // Create an Integrations Server deployment with the default settings. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "integrations_server.#", "1"), - resource.TestCheckResourceAttr(resName, "integrations_server.0.topology.#", "1"), - resource.TestCheckResourceAttr(resName, "integrations_server.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttrSet(resName, "integrations_server.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "integrations_server.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "integrations_server.0.topology.0.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "integrations_server.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "integrations_server.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "integrations_server.size", "1g"), + resource.TestCheckResourceAttr(resName, "integrations_server.size_resource", "memory"), ), }, { // Change the Integrations Server topology (increase zone count to 2). Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "integrations_server.#", "1"), - resource.TestCheckResourceAttr(resName, "integrations_server.0.topology.#", "1"), - resource.TestCheckResourceAttr(resName, "integrations_server.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttrSet(resName, "integrations_server.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "integrations_server.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "integrations_server.0.topology.0.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "integrations_server.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "integrations_server.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "integrations_server.size", "1g"), + resource.TestCheckResourceAttr(resName, "integrations_server.size_resource", "memory"), ), }, }, diff --git a/ec/acc/deployment_memory_optimized_test.go b/ec/acc/deployment_memory_optimized_test.go index d2d2be8ab..b527a9921 100644 --- a/ec/acc/deployment_memory_optimized_test.go +++ b/ec/acc/deployment_memory_optimized_test.go @@ -41,57 +41,37 @@ func TestAccDeployment_memoryOptimized(t *testing.T) { // Create a Memory Optimized deployment with the default settings. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "8g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, { // Change the Elasticsearch topology size and add APM instance. Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "apm.#", "1"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "apm.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "apm.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "apm.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "apm.size", "1g"), + resource.TestCheckResourceAttr(resName, "apm.size_resource", "memory"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, }, diff --git a/ec/acc/deployment_observability_self_test.go b/ec/acc/deployment_observability_self_test.go index 7b6c9a07c..b326c0cc2 100644 --- a/ec/acc/deployment_observability_self_test.go +++ b/ec/acc/deployment_observability_self_test.go @@ -42,9 +42,9 @@ func TestAccDeployment_observability_createWithSelfObservability(t *testing.T) { // After creation, the target-deployment-id should be the id of the created deployment Config: config, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrPair(resName, "observability.0.deployment_id", resName, "id"), - resource.TestCheckResourceAttr(resName, "observability.0.metrics", "true"), - resource.TestCheckResourceAttr(resName, "observability.0.logs", "true"), + resource.TestCheckResourceAttr(resName, "observability.deployment_id", "self"), + resource.TestCheckResourceAttr(resName, "observability.metrics", "true"), + resource.TestCheckResourceAttr(resName, "observability.logs", "true"), ), }, }, diff --git a/ec/acc/deployment_observability_test.go b/ec/acc/deployment_observability_test.go index 75e6aaa3e..ee9464a71 100644 --- a/ec/acc/deployment_observability_test.go +++ b/ec/acc/deployment_observability_test.go @@ -26,7 +26,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) -func TestAccDeployment_observability(t *testing.T) { +func TestAccDeployment_observability_first(t *testing.T) { resName := "ec_deployment.observability" secondResName := "ec_deployment.basic" randomName := prefix + acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) @@ -47,35 +47,34 @@ func TestAccDeployment_observability(t *testing.T) { { Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrPair(resName, "observability.0.deployment_id", secondResName, "id"), - resource.TestCheckResourceAttr(resName, "observability.0.metrics", "true"), - resource.TestCheckResourceAttr(resName, "observability.0.logs", "true"), + resource.TestCheckResourceAttrPair(resName, "observability.deployment_id", secondResName, "id"), + resource.TestCheckResourceAttr(resName, "observability.metrics", "true"), + resource.TestCheckResourceAttr(resName, "observability.logs", "true"), ), }, { Config: secondCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrPair(resName, "observability.0.deployment_id", secondResName, "id"), - resource.TestCheckResourceAttr(resName, "observability.0.metrics", "false"), - resource.TestCheckResourceAttr(resName, "observability.0.logs", "true"), + resource.TestCheckResourceAttrPair(resName, "observability.deployment_id", secondResName, "id"), + resource.TestCheckResourceAttr(resName, "observability.metrics", "false"), + resource.TestCheckResourceAttr(resName, "observability.logs", "true"), ), }, { Config: thirdCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrPair(resName, "observability.0.deployment_id", secondResName, "id"), - resource.TestCheckResourceAttr(resName, "observability.0.metrics", "true"), - resource.TestCheckResourceAttr(resName, "observability.0.logs", "false"), + resource.TestCheckResourceAttrPair(resName, "observability.deployment_id", secondResName, "id"), + resource.TestCheckResourceAttr(resName, "observability.metrics", "true"), + resource.TestCheckResourceAttr(resName, "observability.logs", "false"), ), }, { Config: fourthCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "observability.#", "0"), - resource.TestCheckNoResourceAttr(resName, "observability.0.deployment_id"), - resource.TestCheckNoResourceAttr(resName, "observability.0.metrics"), - resource.TestCheckNoResourceAttr(resName, "observability.0.logs"), - resource.TestCheckNoResourceAttr(resName, "observability.0.ref_id"), + resource.TestCheckNoResourceAttr(resName, "observability.deployment_id"), + resource.TestCheckNoResourceAttr(resName, "observability.metrics"), + resource.TestCheckNoResourceAttr(resName, "observability.logs"), + resource.TestCheckNoResourceAttr(resName, "observability.ref_id"), ), }, }, diff --git a/ec/acc/deployment_observability_tpl_test.go b/ec/acc/deployment_observability_tpl_test.go index dbcebb6ab..6acd74019 100644 --- a/ec/acc/deployment_observability_tpl_test.go +++ b/ec/acc/deployment_observability_tpl_test.go @@ -41,60 +41,39 @@ func TestAccDeployment_observabilityTpl(t *testing.T) { // Create an Observability deployment with the default settings. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "apm.#", "1"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "apm.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "8g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "apm.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "apm.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "apm.size", "1g"), + resource.TestCheckResourceAttr(resName, "apm.size_resource", "memory"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, { // Change the Elasticsearch topology size. Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "apm.#", "1"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "apm.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "apm.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "apm.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "apm.size", "1g"), + resource.TestCheckResourceAttr(resName, "apm.size_resource", "memory"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, }, diff --git a/ec/acc/deployment_post_node_role_upgrade_test.go b/ec/acc/deployment_post_node_role_upgrade_test.go index 01caca25a..a1baf55fa 100644 --- a/ec/acc/deployment_post_node_role_upgrade_test.go +++ b/ec/acc/deployment_post_node_role_upgrade_test.go @@ -44,41 +44,39 @@ func TestAccDeployment_post_node_roles(t *testing.T) { { Config: cfgF(startCfg), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "1g"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "1g"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "1"), + + resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_data"), + resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_ingest"), + resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_master"), + resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_ml"), + + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, { Config: cfgF(upgradeVersionCfg), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "1g"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "1g"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "1"), + + resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_data"), + resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_ingest"), + resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_master"), + resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_ml"), + + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, }, diff --git a/ec/acc/deployment_pre_node_role_migration_test.go b/ec/acc/deployment_pre_node_role_migration_test.go index ea1b0b61a..594cef2c1 100644 --- a/ec/acc/deployment_pre_node_role_migration_test.go +++ b/ec/acc/deployment_pre_node_role_migration_test.go @@ -45,76 +45,69 @@ func TestAccDeployment_pre_node_roles(t *testing.T) { { Config: cfgF(startCfg), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", "true"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", "true"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", "true"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", "false"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_roles.#", "0"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.node_type_data", "true"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.node_type_ingest", "true"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.node_type_master", "true"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.node_type_ml", "false"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.node_roles.#", "0"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "1"), + + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, { Config: cfgF(upgradeVersionCfg), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", "true"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", "true"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", "true"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", "false"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_roles.#", "0"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.node_type_data", "true"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.node_type_ingest", "true"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.node_type_master", "true"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.node_type_ml", "false"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.node_roles.#", "0"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "1"), + + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, { Config: cfgF(addWarmTopologyCfg), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "2"), - // Hot - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_roles.#", "0"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "1"), + + resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_data"), + resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_ingest"), + resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_master"), + resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_ml"), // Warm - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.1.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.id", "warm"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.1.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.warm.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.warm.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.warm.zone_count", "1"), + + resource.TestCheckNoResourceAttr(resName, "elastic.warm.node_type_data"), + resource.TestCheckNoResourceAttr(resName, "elastic.warm.node_type_ingest"), + resource.TestCheckNoResourceAttr(resName, "elastic.warm.node_type_master"), + resource.TestCheckNoResourceAttr(resName, "elastic.warm.node_type_ml"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, }, diff --git a/ec/acc/deployment_security_test.go b/ec/acc/deployment_security_test.go index aa015d3f1..b0ec77994 100644 --- a/ec/acc/deployment_security_test.go +++ b/ec/acc/deployment_security_test.go @@ -41,57 +41,37 @@ func TestAccDeployment_security(t *testing.T) { // Create a Security deployment with the default settings. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "8g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, { // Change the Elasticsearch topology size and add APM instance. Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "apm.#", "1"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "apm.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "apm.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "apm.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "apm.size", "1g"), + resource.TestCheckResourceAttr(resName, "apm.size_resource", "memory"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, }, diff --git a/ec/acc/deployment_snapshot_test.go b/ec/acc/deployment_snapshot_test.go index f8b91d78b..1a9adc493 100644 --- a/ec/acc/deployment_snapshot_test.go +++ b/ec/acc/deployment_snapshot_test.go @@ -80,7 +80,7 @@ func readEsCredentials(t *testing.T, esCreds *creds) resource.TestCheckFunc { continue } - esCreds.URL = rs.Primary.Attributes["elasticsearch.0.https_endpoint"] + esCreds.URL = rs.Primary.Attributes["elasticsearch.https_endpoint"] esCreds.User = rs.Primary.Attributes["elasticsearch_username"] esCreds.Pass = rs.Primary.Attributes["elasticsearch_password"] } diff --git a/ec/acc/deployment_traffic_filter_association_test.go b/ec/acc/deployment_traffic_filter_association_test.go index 05e0b0010..ac102c260 100644 --- a/ec/acc/deployment_traffic_filter_association_test.go +++ b/ec/acc/deployment_traffic_filter_association_test.go @@ -76,7 +76,7 @@ func TestAccDeploymentTrafficFilterAssociation_UpgradeFrom0_4_1(t *testing.T) { resName := "ec_deployment_traffic_filter.tf_assoc" resAssocName := "ec_deployment_traffic_filter_association.tf_assoc" randomName := acctest.RandomWithPrefix(prefix) - startCfg := "testdata/deployment_traffic_filter_association_basic.tf" + startCfg := "testdata/deployment_traffic_filter_association_basic_041.tf" ignoreChangesCfgFile := "testdata/deployment_traffic_filter_association_basic_ignore_changes.tf" cfg := fixtureAccDeploymentTrafficFilterResourceAssociationBasic(t, startCfg, randomName, getRegion(), defaultTemplate) ignoreChangesCfg := fixtureAccDeploymentTrafficFilterResourceAssociationBasic(t, ignoreChangesCfgFile, randomName, getRegion(), defaultTemplate) diff --git a/ec/acc/deployment_with_extension_bundle_test.go b/ec/acc/deployment_with_extension_bundle_test.go index f6b3174ac..dabfdc618 100644 --- a/ec/acc/deployment_with_extension_bundle_test.go +++ b/ec/acc/deployment_with_extension_bundle_test.go @@ -36,7 +36,10 @@ func TestAccDeployment_withExtension(t *testing.T) { randomName := prefix + acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) filePath := filepath.Join(os.TempDir(), "extension.zip") - defer os.Remove(filePath) + + // TODO: this causes the test to fail with the invalid file error + // however we need find a way to delete the temp file + // defer os.Remove(filePath) cfg := fixtureAccDeploymentWithExtensionBundle(t, "testdata/deployment_with_extension_bundle_file.tf", @@ -68,8 +71,8 @@ func TestAccDeployment_withExtension(t *testing.T) { resource.TestCheckResourceAttr(extResName, "description", "desc"), resource.TestCheckResourceAttr(extResName, "extension_type", "bundle"), resource.TestCheckResourceAttr(extResName, "file_path", filePath), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.extension.#", "1"), - resource.TestCheckTypeSetElemNestedAttrs(resName, "elasticsearch.0.extension.*", map[string]string{ + resource.TestCheckResourceAttr(resName, "elasticsearch.extension.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resName, "elasticsearch.extension.*", map[string]string{ "type": "bundle", "name": randomName, }), diff --git a/ec/acc/testdata/datasource_deployment_basic.tf b/ec/acc/testdata/datasource_deployment_basic.tf index 4692944e1..458c79725 100644 --- a/ec/acc/testdata/datasource_deployment_basic.tf +++ b/ec/acc/testdata/datasource_deployment_basic.tf @@ -9,11 +9,11 @@ resource "ec_deployment" "basic_observability" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + hot = { + size = "1g" + zone_count = 1 + autoscaling = {} } } } @@ -25,21 +25,21 @@ resource "ec_deployment" "basic_datasource" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + hot = { + size = "1g" + zone_count = 1 + autoscaling = {} } } - kibana {} + kibana = {} - apm {} + apm = {} - enterprise_search {} + enterprise_search = {} - observability { + observability = { deployment_id = ec_deployment.basic_observability.id } diff --git a/ec/acc/testdata/datasource_tags.tf b/ec/acc/testdata/datasource_tags.tf index 78bf36c6b..8b88f7077 100644 --- a/ec/acc/testdata/datasource_tags.tf +++ b/ec/acc/testdata/datasource_tags.tf @@ -14,11 +14,11 @@ resource "ec_deployment" "tags" { "test_id" = "%s" } - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + hot = { + size = "1g" + zone_count = 1 + autoscaling = {} } } } diff --git a/ec/acc/testdata/deployment_autoscaling_1.tf b/ec/acc/testdata/deployment_autoscaling_1.tf index c5db2fcef..19c474d65 100644 --- a/ec/acc/testdata/deployment_autoscaling_1.tf +++ b/ec/acc/testdata/deployment_autoscaling_1.tf @@ -9,43 +9,42 @@ resource "ec_deployment" "autoscaling" { version = data.ec_stack.autoscaling.version deployment_template_id = "%s" - elasticsearch { + elasticsearch = { autoscale = "true" - topology { - id = "cold" - size = "0g" - zone_count = 1 + cold = { + size = "0g" + zone_count = 1 + autoscaling = {} } - topology { - id = "frozen" - size = "0g" - zone_count = 1 + frozen = { + size = "0g" + zone_count = 1 + autoscaling = {} } - topology { - id = "hot_content" + hot = { size = "1g" zone_count = 1 - autoscaling { + autoscaling = { max_size = "8g" } } - topology { - id = "ml" + + ml = { size = "1g" zone_count = 1 - autoscaling { + autoscaling = { min_size = "1g" max_size = "4g" } } - topology { - id = "warm" + + warm = { size = "2g" zone_count = 1 - autoscaling { + autoscaling = { max_size = "15g" } } diff --git a/ec/acc/testdata/deployment_autoscaling_2.tf b/ec/acc/testdata/deployment_autoscaling_2.tf index d3602c960..c1a077522 100644 --- a/ec/acc/testdata/deployment_autoscaling_2.tf +++ b/ec/acc/testdata/deployment_autoscaling_2.tf @@ -9,43 +9,42 @@ resource "ec_deployment" "autoscaling" { version = data.ec_stack.autoscaling.version deployment_template_id = "%s" - elasticsearch { + elasticsearch = { autoscale = "false" - topology { - id = "cold" - size = "0g" - zone_count = 1 + cold = { + size = "0g" + zone_count = 1 + autoscaling = {} } - topology { - id = "frozen" - size = "0g" - zone_count = 1 + frozen = { + size = "0g" + zone_count = 1 + autoscaling = {} } - topology { - id = "hot_content" + hot = { size = "1g" zone_count = 1 - autoscaling { + autoscaling = { max_size = "8g" } } - topology { - id = "ml" + + ml = { size = "0g" zone_count = 1 - autoscaling { + autoscaling = { min_size = "0g" max_size = "4g" } } - topology { - id = "warm" + + warm = { size = "2g" zone_count = 1 - autoscaling { + autoscaling = { max_size = "15g" } } diff --git a/ec/acc/testdata/deployment_basic.tf b/ec/acc/testdata/deployment_basic.tf index 5ef30a6b4..3bbe7becc 100644 --- a/ec/acc/testdata/deployment_basic.tf +++ b/ec/acc/testdata/deployment_basic.tf @@ -10,28 +10,22 @@ resource "ec_deployment" "basic" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" + elasticsearch = { + hot = { + size = "1g" + autoscaling = {} } } - kibana { - topology { - instance_configuration_id = "%s" - } + kibana = { + instance_configuration_id = "%s" } - apm { - topology { - instance_configuration_id = "%s" - } + apm = { + instance_configuration_id = "%s" } - enterprise_search { - topology { - instance_configuration_id = "%s" - } + enterprise_search = { + instance_configuration_id = "%s" } } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_basic_defaults_1.tf b/ec/acc/testdata/deployment_basic_defaults_1.tf index 35ac7d658..e057ef35c 100644 --- a/ec/acc/testdata/deployment_basic_defaults_1.tf +++ b/ec/acc/testdata/deployment_basic_defaults_1.tf @@ -9,13 +9,15 @@ resource "ec_deployment" "defaults" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch {} + elasticsearch = { + hot = { + autoscaling = {} + } + } - kibana {} + kibana = {} - enterprise_search { - topology { - zone_count = 1 - } + enterprise_search = { + zone_count = 1 } } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_basic_defaults_2.tf b/ec/acc/testdata/deployment_basic_defaults_2.tf index 430c283da..559c35662 100644 --- a/ec/acc/testdata/deployment_basic_defaults_2.tf +++ b/ec/acc/testdata/deployment_basic_defaults_2.tf @@ -9,23 +9,21 @@ resource "ec_deployment" "defaults" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch {} - - kibana { - topology { - size = "2g" + elasticsearch = { + hot = { + autoscaling = {} } } - apm { - topology { - size = "1g" - } + kibana = { + size = "2g" } - enterprise_search { - topology { - zone_count = 1 - } + apm = { + size = "1g" + } + + enterprise_search = { + zone_count = 1 } } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_basic_defaults_3.tf b/ec/acc/testdata/deployment_basic_defaults_3.tf index 77e0eb480..54a0ed51a 100644 --- a/ec/acc/testdata/deployment_basic_defaults_3.tf +++ b/ec/acc/testdata/deployment_basic_defaults_3.tf @@ -9,12 +9,12 @@ resource "ec_deployment" "defaults" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" + elasticsearch = { + hot = { + size = "1g" + autoscaling = {} } } - kibana {} + kibana = {} } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_basic_defaults_hw_1.tf b/ec/acc/testdata/deployment_basic_defaults_hw_1.tf index 69095efd1..997c21523 100644 --- a/ec/acc/testdata/deployment_basic_defaults_hw_1.tf +++ b/ec/acc/testdata/deployment_basic_defaults_hw_1.tf @@ -9,10 +9,10 @@ resource "ec_deployment" "defaults" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" + elasticsearch = { + hot = { + size = "1g" + autoscaling = {} } } } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_basic_defaults_hw_2.tf b/ec/acc/testdata/deployment_basic_defaults_hw_2.tf index dba23d472..cd6aa7475 100644 --- a/ec/acc/testdata/deployment_basic_defaults_hw_2.tf +++ b/ec/acc/testdata/deployment_basic_defaults_hw_2.tf @@ -9,7 +9,14 @@ resource "ec_deployment" "defaults" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch {} + elasticsearch = { + hot = { + autoscaling = {} + } + warm = { + autoscaling = {} + } + } - kibana {} -} \ No newline at end of file + kibana = {} +} diff --git a/ec/acc/testdata/deployment_basic_integrations_server_1.tf b/ec/acc/testdata/deployment_basic_integrations_server_1.tf index 87ee0c877..8faee116b 100644 --- a/ec/acc/testdata/deployment_basic_integrations_server_1.tf +++ b/ec/acc/testdata/deployment_basic_integrations_server_1.tf @@ -9,10 +9,14 @@ resource "ec_deployment" "basic" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch {} + elasticsearch = { + hot = { + autoscaling = {} + } + } - kibana {} + kibana = {} - integrations_server {} + integrations_server = {} } diff --git a/ec/acc/testdata/deployment_basic_integrations_server_2.tf b/ec/acc/testdata/deployment_basic_integrations_server_2.tf index 6c1e1d70b..936e9d875 100644 --- a/ec/acc/testdata/deployment_basic_integrations_server_2.tf +++ b/ec/acc/testdata/deployment_basic_integrations_server_2.tf @@ -9,13 +9,15 @@ resource "ec_deployment" "basic" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch {} + elasticsearch = { + hot = { + autoscaling = {} + } + } - kibana {} + kibana = {} - integrations_server { - topology { - zone_count = 2 - } + integrations_server = { + zone_count = 2 } } diff --git a/ec/acc/testdata/deployment_basic_settings_config_1.tf b/ec/acc/testdata/deployment_basic_settings_config_1.tf index d54ed1440..daaadb7f3 100644 --- a/ec/acc/testdata/deployment_basic_settings_config_1.tf +++ b/ec/acc/testdata/deployment_basic_settings_config_1.tf @@ -9,28 +9,22 @@ resource "ec_deployment" "basic" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" + elasticsearch = { + hot = { + size = "1g" + autoscaling = {} } } - kibana { - topology { - instance_configuration_id = "%s" - } + kibana = { + instance_configuration_id = "%s" } - apm { - topology { - instance_configuration_id = "%s" - } + apm = { + instance_configuration_id = "%s" } - enterprise_search { - topology { - instance_configuration_id = "%s" - } + enterprise_search = { + instance_configuration_id = "%s" } } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_basic_settings_config_2.tf b/ec/acc/testdata/deployment_basic_settings_config_2.tf index 9cfb4f994..eb3f46205 100644 --- a/ec/acc/testdata/deployment_basic_settings_config_2.tf +++ b/ec/acc/testdata/deployment_basic_settings_config_2.tf @@ -9,41 +9,38 @@ resource "ec_deployment" "basic" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - config { + elasticsearch = { + config = { user_settings_yaml = "action.auto_create_index: true" } - topology { - id = "hot_content" - size = "1g" + hot = { + size = "1g" + autoscaling = {} } } - kibana { - config { + kibana = { + config = { user_settings_yaml = "csp.warnLegacyBrowsers: true" } - topology { - instance_configuration_id = "%s" - } + + instance_configuration_id = "%s" } - apm { - config { + apm = { + config = { debug_enabled = true user_settings_json = jsonencode({ "apm-server.rum.enabled" = true }) } - topology { - instance_configuration_id = "%s" - } + + instance_configuration_id = "%s" } - enterprise_search { - config { + enterprise_search = { + config = { user_settings_yaml = "# comment" } - topology { - instance_configuration_id = "%s" - } + + instance_configuration_id = "%s" } } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_basic_settings_config_import.tf b/ec/acc/testdata/deployment_basic_settings_config_import.tf new file mode 100644 index 000000000..8209f974b --- /dev/null +++ b/ec/acc/testdata/deployment_basic_settings_config_import.tf @@ -0,0 +1,56 @@ +data "ec_stack" "latest" { + version_regex = "latest" + region = "%s" +} + +resource "ec_deployment" "basic" { + name = "%s" + region = "%s" + version = data.ec_stack.latest.version + deployment_template_id = "%s" + + elasticsearch = { + hot = { + size = "1g" + autoscaling = {} + } + + warm = { + autoscaling = {} + } + + cold = { + autoscaling = {} + } + + frozen = { + autoscaling = {} + } + + ml = { + autoscaling = {} + } + + master = { + autoscaling = {} + } + + coordinating = { + autoscaling = {} + } + + config = {} + } + + kibana = { + instance_configuration_id = "%s" + } + + apm = { + instance_configuration_id = "%s" + } + + enterprise_search = { + instance_configuration_id = "%s" + } +} \ No newline at end of file diff --git a/ec/acc/testdata/deployment_basic_tags_1.tf b/ec/acc/testdata/deployment_basic_tags_1.tf index 46e8e19a8..a6cba36f1 100644 --- a/ec/acc/testdata/deployment_basic_tags_1.tf +++ b/ec/acc/testdata/deployment_basic_tags_1.tf @@ -9,10 +9,10 @@ resource "ec_deployment" "tags" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "2g" + elasticsearch = { + hot = { + size = "2g" + autoscaling = {} } } diff --git a/ec/acc/testdata/deployment_basic_tags_2.tf b/ec/acc/testdata/deployment_basic_tags_2.tf index 48b95175b..5569f971b 100644 --- a/ec/acc/testdata/deployment_basic_tags_2.tf +++ b/ec/acc/testdata/deployment_basic_tags_2.tf @@ -9,10 +9,10 @@ resource "ec_deployment" "tags" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "2g" + elasticsearch = { + hot = { + size = "2g" + autoscaling = {} } } diff --git a/ec/acc/testdata/deployment_basic_tags_3.tf b/ec/acc/testdata/deployment_basic_tags_3.tf index f46b5a7cd..b8af88b40 100644 --- a/ec/acc/testdata/deployment_basic_tags_3.tf +++ b/ec/acc/testdata/deployment_basic_tags_3.tf @@ -9,10 +9,10 @@ resource "ec_deployment" "tags" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "2g" + elasticsearch = { + hot = { + size = "2g" + autoscaling = {} } } } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_basic_tags_4.tf b/ec/acc/testdata/deployment_basic_tags_4.tf index 9edfd4503..202dddb0e 100644 --- a/ec/acc/testdata/deployment_basic_tags_4.tf +++ b/ec/acc/testdata/deployment_basic_tags_4.tf @@ -9,10 +9,10 @@ resource "ec_deployment" "tags" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "2g" + elasticsearch = { + hot = { + size = "2g" + autoscaling = {} } } diff --git a/ec/acc/testdata/deployment_basic_with_traffic_filter_2.tf b/ec/acc/testdata/deployment_basic_with_traffic_filter_2.tf index b397de690..04c4bf903 100644 --- a/ec/acc/testdata/deployment_basic_with_traffic_filter_2.tf +++ b/ec/acc/testdata/deployment_basic_with_traffic_filter_2.tf @@ -9,18 +9,18 @@ resource "ec_deployment" "basic" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" + elasticsearch = { + hot = { + size = "1g" + autoscaling = {} } } - kibana {} + kibana = {} - apm {} + apm = {} - enterprise_search {} + enterprise_search = {} traffic_filter = [ ec_deployment_traffic_filter.default.id, diff --git a/ec/acc/testdata/deployment_basic_with_traffic_filter_3.tf b/ec/acc/testdata/deployment_basic_with_traffic_filter_3.tf index 1e975039e..85026ccd3 100644 --- a/ec/acc/testdata/deployment_basic_with_traffic_filter_3.tf +++ b/ec/acc/testdata/deployment_basic_with_traffic_filter_3.tf @@ -9,18 +9,18 @@ resource "ec_deployment" "basic" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" + elasticsearch = { + hot = { + size = "1g" + autoscaling = {} } } - kibana {} + kibana = {} - apm {} + apm = {} - enterprise_search {} + enterprise_search = {} traffic_filter = [ ec_deployment_traffic_filter.second.id, diff --git a/ec/acc/testdata/deployment_ccs_1.tf b/ec/acc/testdata/deployment_ccs_1.tf index bfa969f70..9a0166aad 100644 --- a/ec/acc/testdata/deployment_ccs_1.tf +++ b/ec/acc/testdata/deployment_ccs_1.tf @@ -9,14 +9,17 @@ resource "ec_deployment" "ccs" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - dynamic "remote_cluster" { - for_each = ec_deployment.source_ccs - content { - deployment_id = remote_cluster.value.id - alias = remote_cluster.value.name - } + elasticsearch = { + hot = { + autoscaling = {} } + + "remote_cluster" = [for source_css in ec_deployment.source_ccs : + { + deployment_id = source_css.id + alias = source_css.name + } + ] } } @@ -27,11 +30,11 @@ resource "ec_deployment" "source_ccs" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - zone_count = 1 - size = "1g" + elasticsearch = { + hot = { + zone_count = 1 + size = "1g" + autoscaling = {} } } } diff --git a/ec/acc/testdata/deployment_ccs_2.tf b/ec/acc/testdata/deployment_ccs_2.tf index 7d385694d..3a76e7c5a 100644 --- a/ec/acc/testdata/deployment_ccs_2.tf +++ b/ec/acc/testdata/deployment_ccs_2.tf @@ -9,12 +9,12 @@ resource "ec_deployment" "ccs" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "2g" + elasticsearch = { + hot = { + size = "2g" + autoscaling = {} } } - kibana {} + kibana = {} } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_compute_optimized_1.tf b/ec/acc/testdata/deployment_compute_optimized_1.tf index 0512241d1..6826c5d2a 100644 --- a/ec/acc/testdata/deployment_compute_optimized_1.tf +++ b/ec/acc/testdata/deployment_compute_optimized_1.tf @@ -9,7 +9,11 @@ resource "ec_deployment" "compute_optimized" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch {} + elasticsearch = { + hot = { + autoscaling = {} + } + } - kibana {} + kibana = {} } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_compute_optimized_2.tf b/ec/acc/testdata/deployment_compute_optimized_2.tf index ab5c27138..f6f26f1c0 100644 --- a/ec/acc/testdata/deployment_compute_optimized_2.tf +++ b/ec/acc/testdata/deployment_compute_optimized_2.tf @@ -9,14 +9,14 @@ resource "ec_deployment" "compute_optimized" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "2g" + elasticsearch = { + hot = { + size = "2g" + autoscaling = {} } } - kibana {} + kibana = {} - apm {} + apm = {} } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_dedicated_coordinating.tf b/ec/acc/testdata/deployment_dedicated_coordinating.tf index 4f47a80d8..07a500b1f 100644 --- a/ec/acc/testdata/deployment_dedicated_coordinating.tf +++ b/ec/acc/testdata/deployment_dedicated_coordinating.tf @@ -9,21 +9,23 @@ resource "ec_deployment" "dedicated_coordinating" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "coordinating" - zone_count = 2 - size = "1g" + elasticsearch = { + coordinating = { + zone_count = 2 + size = "1g" + autoscaling = {} } - topology { - id = "hot_content" - zone_count = 1 - size = "1g" + + hot = { + zone_count = 1 + size = "1g" + autoscaling = {} } - topology { - id = "warm" - zone_count = 1 - size = "2g" + + warm = { + zone_count = 1 + size = "2g" + autoscaling = {} } } } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_dedicated_master.tf b/ec/acc/testdata/deployment_dedicated_master.tf index 4ec815812..d1ed6e9eb 100644 --- a/ec/acc/testdata/deployment_dedicated_master.tf +++ b/ec/acc/testdata/deployment_dedicated_master.tf @@ -9,26 +9,29 @@ resource "ec_deployment" "dedicated_master" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "cold" - zone_count = 1 - size = "2g" + elasticsearch = { + cold = { + zone_count = 1 + size = "2g" + autoscaling = {} } - topology { - id = "hot_content" - zone_count = 3 - size = "1g" + + hot = { + zone_count = 3 + size = "1g" + autoscaling = {} } - topology { - id = "master" - zone_count = 3 - size = "1g" + + master = { + zone_count = 3 + size = "1g" + autoscaling = {} } - topology { - id = "warm" - zone_count = 2 - size = "2g" + + warm = { + zone_count = 2 + size = "2g" + autoscaling = {} } } } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_docker_image_override.tf b/ec/acc/testdata/deployment_docker_image_override.tf index c0ad75f96..d4712df1e 100644 --- a/ec/acc/testdata/deployment_docker_image_override.tf +++ b/ec/acc/testdata/deployment_docker_image_override.tf @@ -15,35 +15,35 @@ resource "ec_deployment" "docker_image" { version = data.ec_stack.latest.version deployment_template_id = local.deployment_template - elasticsearch { - config { + elasticsearch = { + config = { docker_image = "docker.elastic.co/cloud-ci/elasticsearch:7.15.0-SNAPSHOT" } - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + + hot = { + size = "1g" + zone_count = 1 + autoscaling = {} } } - kibana { - config { + kibana = { + config = { docker_image = "docker.elastic.co/cloud-ci/kibana:7.15.0-SNAPSHOT" } } - apm { - config { + apm = { + config = { docker_image = "docker.elastic.co/cloud-ci/apm:7.15.0-SNAPSHOT" } } - enterprise_search { - config { + enterprise_search = { + config = { docker_image = "docker.elastic.co/cloud-ci/enterprise-search:7.15.0-SNAPSHOT" } - topology { - zone_count = 1 - } + + zone_count = 1 } } diff --git a/ec/acc/testdata/deployment_elasticsearch_keystore_1.tf b/ec/acc/testdata/deployment_elasticsearch_keystore_1.tf index 094e23a3b..efab4a609 100644 --- a/ec/acc/testdata/deployment_elasticsearch_keystore_1.tf +++ b/ec/acc/testdata/deployment_elasticsearch_keystore_1.tf @@ -9,11 +9,11 @@ resource "ec_deployment" "keystore" { version = data.ec_stack.keystore.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + hot = { + size = "1g" + zone_count = 1 + autoscaling = {} } } } diff --git a/ec/acc/testdata/deployment_elasticsearch_keystore_1_041.tf b/ec/acc/testdata/deployment_elasticsearch_keystore_1_041.tf new file mode 100644 index 000000000..094e23a3b --- /dev/null +++ b/ec/acc/testdata/deployment_elasticsearch_keystore_1_041.tf @@ -0,0 +1,32 @@ +data "ec_stack" "keystore" { + version_regex = "latest" + region = "%s" +} + +resource "ec_deployment" "keystore" { + name = "%s" + region = "%s" + version = data.ec_stack.keystore.version + deployment_template_id = "%s" + + elasticsearch { + topology { + id = "hot_content" + size = "1g" + zone_count = 1 + } + } +} + +resource "ec_deployment_elasticsearch_keystore" "test" { + deployment_id = ec_deployment.keystore.id + setting_name = "xpack.notification.slack.account.hello.secure_url" + value = "hella" +} + +resource "ec_deployment_elasticsearch_keystore" "gcs_creds" { + deployment_id = ec_deployment.keystore.id + setting_name = "gcs.client.secondary.credentials_file" + value = file("testdata/deployment_elasticsearch_keystore_creds.json") +} + diff --git a/ec/acc/testdata/deployment_elasticsearch_keystore_1_migrated.tf b/ec/acc/testdata/deployment_elasticsearch_keystore_1_migrated.tf new file mode 100644 index 000000000..efab4a609 --- /dev/null +++ b/ec/acc/testdata/deployment_elasticsearch_keystore_1_migrated.tf @@ -0,0 +1,32 @@ +data "ec_stack" "keystore" { + version_regex = "latest" + region = "%s" +} + +resource "ec_deployment" "keystore" { + name = "%s" + region = "%s" + version = data.ec_stack.keystore.version + deployment_template_id = "%s" + + elasticsearch = { + hot = { + size = "1g" + zone_count = 1 + autoscaling = {} + } + } +} + +resource "ec_deployment_elasticsearch_keystore" "test" { + deployment_id = ec_deployment.keystore.id + setting_name = "xpack.notification.slack.account.hello.secure_url" + value = "hella" +} + +resource "ec_deployment_elasticsearch_keystore" "gcs_creds" { + deployment_id = ec_deployment.keystore.id + setting_name = "gcs.client.secondary.credentials_file" + value = file("testdata/deployment_elasticsearch_keystore_creds.json") +} + diff --git a/ec/acc/testdata/deployment_elasticsearch_keystore_2.tf b/ec/acc/testdata/deployment_elasticsearch_keystore_2.tf index 4a04a7c2f..1dfd64808 100644 --- a/ec/acc/testdata/deployment_elasticsearch_keystore_2.tf +++ b/ec/acc/testdata/deployment_elasticsearch_keystore_2.tf @@ -9,11 +9,11 @@ resource "ec_deployment" "keystore" { version = data.ec_stack.keystore.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + hot = { + size = "1g" + zone_count = 1 + autoscaling = {} } } } diff --git a/ec/acc/testdata/deployment_elasticsearch_keystore_3.tf b/ec/acc/testdata/deployment_elasticsearch_keystore_3.tf index 572e49b21..a20a03a8f 100644 --- a/ec/acc/testdata/deployment_elasticsearch_keystore_3.tf +++ b/ec/acc/testdata/deployment_elasticsearch_keystore_3.tf @@ -9,11 +9,11 @@ resource "ec_deployment" "keystore" { version = data.ec_stack.keystore.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + hot = { + size = "1g" + zone_count = 1 + autoscaling = {} } } } diff --git a/ec/acc/testdata/deployment_elasticsearch_keystore_4.tf b/ec/acc/testdata/deployment_elasticsearch_keystore_4.tf index 85c3f2fb4..09d7564eb 100644 --- a/ec/acc/testdata/deployment_elasticsearch_keystore_4.tf +++ b/ec/acc/testdata/deployment_elasticsearch_keystore_4.tf @@ -9,11 +9,11 @@ resource "ec_deployment" "keystore" { version = data.ec_stack.keystore.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + hot = { + size = "1g" + zone_count = 1 + autoscaling = {} } } } diff --git a/ec/acc/testdata/deployment_emptyconfig.tf b/ec/acc/testdata/deployment_emptyconfig.tf index c51f87ca8..b8e1122b6 100644 --- a/ec/acc/testdata/deployment_emptyconfig.tf +++ b/ec/acc/testdata/deployment_emptyconfig.tf @@ -9,14 +9,14 @@ resource "ec_deployment" "emptyconfig" { version = data.ec_stack.emptyconfig.version deployment_template_id = "%s" - elasticsearch { - config { + elasticsearch = { + config = { user_settings_yaml = null } - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + hot = { + size = "1g" + zone_count = 1 + autoscaling = {} } } } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_enterprise_search_1.tf b/ec/acc/testdata/deployment_enterprise_search_1.tf index 1490e541a..91ea5a30d 100644 --- a/ec/acc/testdata/deployment_enterprise_search_1.tf +++ b/ec/acc/testdata/deployment_enterprise_search_1.tf @@ -9,9 +9,13 @@ resource "ec_deployment" "enterprise_search" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch {} + elasticsearch = { + hot = { + autoscaling = {} + } + } - kibana {} + kibana = {} - enterprise_search {} + enterprise_search = {} } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_enterprise_search_2.tf b/ec/acc/testdata/deployment_enterprise_search_2.tf index 29eeb4f24..fb610a94e 100644 --- a/ec/acc/testdata/deployment_enterprise_search_2.tf +++ b/ec/acc/testdata/deployment_enterprise_search_2.tf @@ -9,14 +9,14 @@ resource "ec_deployment" "enterprise_search" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "2g" + elasticsearch = { + hot = { + size = "2g" + autoscaling = {} } } - kibana {} + kibana = {} - enterprise_search {} + enterprise_search = {} } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_hotwarm_1.tf b/ec/acc/testdata/deployment_hotwarm_1.tf index 8e693f913..ac5502b1e 100644 --- a/ec/acc/testdata/deployment_hotwarm_1.tf +++ b/ec/acc/testdata/deployment_hotwarm_1.tf @@ -9,5 +9,13 @@ resource "ec_deployment" "hotwarm" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch {} + elasticsearch = { + hot = { + autoscaling = {} + } + + warm = { + autoscaling = {} + } + } } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_hotwarm_2.tf b/ec/acc/testdata/deployment_hotwarm_2.tf index 03ca44de1..489a5d10a 100644 --- a/ec/acc/testdata/deployment_hotwarm_2.tf +++ b/ec/acc/testdata/deployment_hotwarm_2.tf @@ -9,16 +9,18 @@ resource "ec_deployment" "hotwarm" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - zone_count = 1 - size = "1g" + elasticsearch = { + hot = { + zone_count = 1 + size = "1g" + autoscaling = {} } - topology { - id = "warm" - zone_count = 1 - size = "2g" + + warm = { + zone_count = 1 + size = "2g" + autoscaling = {} } + } } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_memory_optimized_1.tf b/ec/acc/testdata/deployment_memory_optimized_1.tf index dd7421c27..57848c402 100644 --- a/ec/acc/testdata/deployment_memory_optimized_1.tf +++ b/ec/acc/testdata/deployment_memory_optimized_1.tf @@ -9,7 +9,11 @@ resource "ec_deployment" "memory_optimized" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch {} + elasticsearch = { + hot = { + autoscaling = {} + } + } - kibana {} + kibana = {} } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_memory_optimized_2.tf b/ec/acc/testdata/deployment_memory_optimized_2.tf index ac762c066..a0a5f5e4e 100644 --- a/ec/acc/testdata/deployment_memory_optimized_2.tf +++ b/ec/acc/testdata/deployment_memory_optimized_2.tf @@ -9,14 +9,14 @@ resource "ec_deployment" "memory_optimized" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "2g" + elasticsearch = { + hot = { + size = "2g" + autoscaling = {} } } - kibana {} + kibana = {} - apm {} + apm = {} } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_observability_1.tf b/ec/acc/testdata/deployment_observability_1.tf index 6811f6eb0..6ff24f65b 100644 --- a/ec/acc/testdata/deployment_observability_1.tf +++ b/ec/acc/testdata/deployment_observability_1.tf @@ -9,11 +9,11 @@ resource "ec_deployment" "basic" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + hot = { + size = "1g" + zone_count = 1 + autoscaling = {} } } } @@ -24,15 +24,15 @@ resource "ec_deployment" "observability" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + hot = { + size = "1g" + zone_count = 1 + autoscaling = {} } } - observability { + observability = { deployment_id = ec_deployment.basic.id } } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_observability_2.tf b/ec/acc/testdata/deployment_observability_2.tf index 709a77949..dd626cc62 100644 --- a/ec/acc/testdata/deployment_observability_2.tf +++ b/ec/acc/testdata/deployment_observability_2.tf @@ -9,11 +9,11 @@ resource "ec_deployment" "basic" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + hot = { + size = "1g" + zone_count = 1 + autoscaling = {} } } } @@ -24,15 +24,15 @@ resource "ec_deployment" "observability" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + hot = { + size = "1g" + zone_count = 1 + autoscaling = {} } } - observability { + observability = { deployment_id = ec_deployment.basic.id metrics = false } diff --git a/ec/acc/testdata/deployment_observability_3.tf b/ec/acc/testdata/deployment_observability_3.tf index 976d6a545..0587d25ca 100644 --- a/ec/acc/testdata/deployment_observability_3.tf +++ b/ec/acc/testdata/deployment_observability_3.tf @@ -9,11 +9,11 @@ resource "ec_deployment" "basic" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + hot = { + size = "1g" + zone_count = 1 + autoscaling = {} } } } @@ -24,15 +24,15 @@ resource "ec_deployment" "observability" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + hot = { + size = "1g" + zone_count = 1 + autoscaling = {} } } - observability { + observability = { deployment_id = ec_deployment.basic.id logs = false } diff --git a/ec/acc/testdata/deployment_observability_4.tf b/ec/acc/testdata/deployment_observability_4.tf index 7ee57ac9e..411be808e 100644 --- a/ec/acc/testdata/deployment_observability_4.tf +++ b/ec/acc/testdata/deployment_observability_4.tf @@ -9,11 +9,11 @@ resource "ec_deployment" "basic" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + hot = { + size = "1g" + zone_count = 1 + autoscaling = {} } } } @@ -24,11 +24,11 @@ resource "ec_deployment" "observability" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + hot = { + size = "1g" + zone_count = 1 + autoscaling = {} } } } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_observability_self.tf b/ec/acc/testdata/deployment_observability_self.tf index 7ab3d8df4..1c8523027 100644 --- a/ec/acc/testdata/deployment_observability_self.tf +++ b/ec/acc/testdata/deployment_observability_self.tf @@ -9,24 +9,22 @@ resource "ec_deployment" "observability" { version = data.ec_stack.latest.version deployment_template_id = "%s" - observability { + observability = { deployment_id = "self" } - elasticsearch { + elasticsearch = { autoscale = "false" - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + hot = { + size = "1g" + zone_count = 1 + autoscaling = {} } } - kibana { - topology { - size = "1g" - zone_count = 1 - } + kibana = { + size = "1g" + zone_count = 1 } } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_observability_tpl_1.tf b/ec/acc/testdata/deployment_observability_tpl_1.tf index 4b475d259..b58f1ecd9 100644 --- a/ec/acc/testdata/deployment_observability_tpl_1.tf +++ b/ec/acc/testdata/deployment_observability_tpl_1.tf @@ -9,9 +9,13 @@ resource "ec_deployment" "observability_tpl" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch {} + elasticsearch = { + hot = { + autoscaling = {} + } + } - kibana {} + kibana = {} - apm {} + apm = {} } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_observability_tpl_2.tf b/ec/acc/testdata/deployment_observability_tpl_2.tf index a3a87acbf..896393889 100644 --- a/ec/acc/testdata/deployment_observability_tpl_2.tf +++ b/ec/acc/testdata/deployment_observability_tpl_2.tf @@ -9,14 +9,14 @@ resource "ec_deployment" "observability_tpl" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "2g" + elasticsearch = { + hot = { + size = "2g" + autoscaling = {} } } - kibana {} + kibana = {} - apm {} + apm = {} } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_post_node_roles_upgrade_1.tf b/ec/acc/testdata/deployment_post_node_roles_upgrade_1.tf index 2b99eff5f..68fe0810d 100644 --- a/ec/acc/testdata/deployment_post_node_roles_upgrade_1.tf +++ b/ec/acc/testdata/deployment_post_node_roles_upgrade_1.tf @@ -9,11 +9,11 @@ resource "ec_deployment" "post_nr_upgrade" { version = data.ec_stack.post_node_roles_upgrade.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + hot = { + size = "1g" + zone_count = 1 + autoscaling = {} } } } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_post_node_roles_upgrade_2.tf b/ec/acc/testdata/deployment_post_node_roles_upgrade_2.tf index 98a710d17..a28f001f2 100644 --- a/ec/acc/testdata/deployment_post_node_roles_upgrade_2.tf +++ b/ec/acc/testdata/deployment_post_node_roles_upgrade_2.tf @@ -9,11 +9,11 @@ resource "ec_deployment" "post_nr_upgrade" { version = data.ec_stack.post_node_roles_upgrade.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + hot = { + size = "1g" + zone_count = 1 + autoscaling = {} } } } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_pre_node_roles_migration_1.tf b/ec/acc/testdata/deployment_pre_node_roles_migration_1.tf index 87619cb96..46f7c995f 100644 --- a/ec/acc/testdata/deployment_pre_node_roles_migration_1.tf +++ b/ec/acc/testdata/deployment_pre_node_roles_migration_1.tf @@ -9,11 +9,11 @@ resource "ec_deployment" "pre_nr" { version = data.ec_stack.pre_node_roles.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + hot = { + size = "1g" + zone_count = 1 + autoscaling = {} } } } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_pre_node_roles_migration_2.tf b/ec/acc/testdata/deployment_pre_node_roles_migration_2.tf index 2d96cbf1f..ede18f462 100644 --- a/ec/acc/testdata/deployment_pre_node_roles_migration_2.tf +++ b/ec/acc/testdata/deployment_pre_node_roles_migration_2.tf @@ -9,11 +9,11 @@ resource "ec_deployment" "pre_nr" { version = data.ec_stack.pre_node_roles.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + hot = { + size = "1g" + zone_count = 1 + autoscaling = {} } } } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_pre_node_roles_migration_3.tf b/ec/acc/testdata/deployment_pre_node_roles_migration_3.tf index a8729b10e..4cbdf674d 100644 --- a/ec/acc/testdata/deployment_pre_node_roles_migration_3.tf +++ b/ec/acc/testdata/deployment_pre_node_roles_migration_3.tf @@ -9,16 +9,18 @@ resource "ec_deployment" "pre_nr" { version = data.ec_stack.pre_node_roles.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + hot = { + size = "1g" + zone_count = 1 + autoscaling = {} } - topology { - id = "warm" - size = "2g" - zone_count = 1 + + warm = { + size = "2g" + zone_count = 1 + autoscaling = {} } + } } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_security_1.tf b/ec/acc/testdata/deployment_security_1.tf index 89140e5f6..235966c38 100644 --- a/ec/acc/testdata/deployment_security_1.tf +++ b/ec/acc/testdata/deployment_security_1.tf @@ -9,7 +9,11 @@ resource "ec_deployment" "security" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch {} + elasticsearch = { + hot = { + autoscaling = {} + } + } - kibana {} + kibana = {} } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_security_2.tf b/ec/acc/testdata/deployment_security_2.tf index 6fa8777df..25c4f4e86 100644 --- a/ec/acc/testdata/deployment_security_2.tf +++ b/ec/acc/testdata/deployment_security_2.tf @@ -9,14 +9,14 @@ resource "ec_deployment" "security" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "2g" + elasticsearch = { + hot = { + size = "2g" + autoscaling = {} } } - kibana {} + kibana = {} - apm {} + apm = {} } \ No newline at end of file diff --git a/ec/acc/testdata/deployment_snapshot_1.tf b/ec/acc/testdata/deployment_snapshot_1.tf index 62500342c..630dc9ea7 100644 --- a/ec/acc/testdata/deployment_snapshot_1.tf +++ b/ec/acc/testdata/deployment_snapshot_1.tf @@ -14,10 +14,10 @@ resource "ec_deployment" "snapshot_source" { version = data.ec_stack.latest.version deployment_template_id = local.deployment_template - elasticsearch { - topology { - id = "hot_content" - size = "1g" + elasticsearch = { + hot = { + size = "1g" + autoscaling = {} } } } diff --git a/ec/acc/testdata/deployment_snapshot_2.tf b/ec/acc/testdata/deployment_snapshot_2.tf index 08c374005..712cd3716 100644 --- a/ec/acc/testdata/deployment_snapshot_2.tf +++ b/ec/acc/testdata/deployment_snapshot_2.tf @@ -14,10 +14,10 @@ resource "ec_deployment" "snapshot_source" { version = data.ec_stack.latest.version deployment_template_id = local.deployment_template - elasticsearch { - topology { - id = "hot_content" - size = "1g" + elasticsearch = { + hot = { + size = "1g" + autoscaling = {} } } } @@ -28,14 +28,15 @@ resource "ec_deployment" "snapshot_target" { version = data.ec_stack.latest.version deployment_template_id = local.deployment_template - elasticsearch { - snapshot_source { + elasticsearch = { + + snapshot_source = [{ source_elasticsearch_cluster_id = ec_deployment.snapshot_source.elasticsearch.0.resource_id - } + }] - topology { - id = "hot_content" - size = "1g" + hot = { + size = "1g" + autoscaling = {} } } } diff --git a/ec/acc/testdata/deployment_traffic_filter_association_basic.tf b/ec/acc/testdata/deployment_traffic_filter_association_basic.tf index 5a5186876..c41cb7f6a 100644 --- a/ec/acc/testdata/deployment_traffic_filter_association_basic.tf +++ b/ec/acc/testdata/deployment_traffic_filter_association_basic.tf @@ -9,14 +9,14 @@ resource "ec_deployment" "tf_assoc" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" + elasticsearch = { + hot = { + size = "1g" + autoscaling = {} } } - kibana {} + kibana = {} } resource "ec_deployment_traffic_filter" "tf_assoc" { diff --git a/ec/acc/testdata/deployment_traffic_filter_association_basic_041.tf b/ec/acc/testdata/deployment_traffic_filter_association_basic_041.tf new file mode 100644 index 000000000..5a5186876 --- /dev/null +++ b/ec/acc/testdata/deployment_traffic_filter_association_basic_041.tf @@ -0,0 +1,35 @@ +data "ec_stack" "latest" { + version_regex = "latest" + region = "%s" +} + +resource "ec_deployment" "tf_assoc" { + name = "%s" + region = "%s" + version = data.ec_stack.latest.version + deployment_template_id = "%s" + + elasticsearch { + topology { + id = "hot_content" + size = "1g" + } + } + + kibana {} +} + +resource "ec_deployment_traffic_filter" "tf_assoc" { + name = "%s" + region = "%s" + type = "ip" + + rule { + source = "0.0.0.0/0" + } +} + +resource "ec_deployment_traffic_filter_association" "tf_assoc" { + traffic_filter_id = ec_deployment_traffic_filter.tf_assoc.id + deployment_id = ec_deployment.tf_assoc.id +} diff --git a/ec/acc/testdata/deployment_traffic_filter_association_basic_ignore_changes.tf b/ec/acc/testdata/deployment_traffic_filter_association_basic_ignore_changes.tf index ce1bce981..6aeea7873 100644 --- a/ec/acc/testdata/deployment_traffic_filter_association_basic_ignore_changes.tf +++ b/ec/acc/testdata/deployment_traffic_filter_association_basic_ignore_changes.tf @@ -9,14 +9,14 @@ resource "ec_deployment" "tf_assoc" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" + elasticsearch = { + hot = { + size = "1g" + autoscaling = {} } } - kibana {} + kibana = {} lifecycle { ignore_changes = [traffic_filter] diff --git a/ec/acc/testdata/deployment_traffic_filter_association_basic_update.tf b/ec/acc/testdata/deployment_traffic_filter_association_basic_update.tf index 212ff8d66..a756bafe6 100644 --- a/ec/acc/testdata/deployment_traffic_filter_association_basic_update.tf +++ b/ec/acc/testdata/deployment_traffic_filter_association_basic_update.tf @@ -9,14 +9,14 @@ resource "ec_deployment" "tf_assoc" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" + elasticsearch = { + hot = { + size = "1g" + autoscaling = {} } } - kibana {} + kibana = {} } resource "ec_deployment_traffic_filter" "tf_assoc_second" { diff --git a/ec/acc/testdata/deployment_upgrade_retry_1.tf b/ec/acc/testdata/deployment_upgrade_retry_1.tf index bde4282b4..58a9e01ad 100644 --- a/ec/acc/testdata/deployment_upgrade_retry_1.tf +++ b/ec/acc/testdata/deployment_upgrade_retry_1.tf @@ -14,13 +14,13 @@ resource "ec_deployment" "upgrade_retry" { version = data.ec_stack.latest.version deployment_template_id = local.deployment_template - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + hot = { + size = "1g" + zone_count = 1 + autoscaling = {} } } - kibana {} + kibana = {} } diff --git a/ec/acc/testdata/deployment_upgrade_retry_2.tf b/ec/acc/testdata/deployment_upgrade_retry_2.tf index d45d1ce9e..ae13523e3 100644 --- a/ec/acc/testdata/deployment_upgrade_retry_2.tf +++ b/ec/acc/testdata/deployment_upgrade_retry_2.tf @@ -14,13 +14,13 @@ resource "ec_deployment" "upgrade_retry" { version = data.ec_stack.latest.version deployment_template_id = local.deployment_template - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + hot = { + size = "1g" + zone_count = 1 + autoscaling = {} } } - kibana {} + kibana = {} } diff --git a/ec/acc/testdata/deployment_with_extension_bundle_file.tf b/ec/acc/testdata/deployment_with_extension_bundle_file.tf index e44aa9d06..7cb1fef0b 100644 --- a/ec/acc/testdata/deployment_with_extension_bundle_file.tf +++ b/ec/acc/testdata/deployment_with_extension_bundle_file.tf @@ -6,7 +6,6 @@ locals { file_path = "%s" } - data "ec_stack" "latest" { version_regex = "latest" region = local.region @@ -18,13 +17,16 @@ resource "ec_deployment" "with_extension" { version = data.ec_stack.latest.version deployment_template_id = local.deployment_template - elasticsearch { - extension { + elasticsearch = { + hot = { + autoscaling = {} + } + extension = [{ type = "bundle" name = local.name version = data.ec_stack.latest.version url = ec_deployment_extension.my_extension.url - } + }] } } diff --git a/ec/ecdatasource/deploymentdatasource/datasource.go b/ec/ecdatasource/deploymentdatasource/datasource.go index 857d6cff7..d4768a708 100644 --- a/ec/ecdatasource/deploymentdatasource/datasource.go +++ b/ec/ecdatasource/deploymentdatasource/datasource.go @@ -31,7 +31,7 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/terraform-provider-ec/ec/internal" - "github.com/elastic/terraform-provider-ec/ec/internal/flatteners" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) @@ -129,7 +129,7 @@ func modelToState(ctx context.Context, res *models.DeploymentGetResponse, state diags.Append(flattenEnterpriseSearchResources(ctx, res.Resources.EnterpriseSearch, &state.EnterpriseSearch)...) if res.Metadata != nil { - state.Tags = flatteners.FlattenTags(res.Metadata.Tags) + state.Tags = converters.TagsToTypeMap(res.Metadata.Tags) } return diags diff --git a/ec/ecdatasource/privatelinkdatasource/aws_datasource.go b/ec/ecdatasource/privatelinkdatasource/aws_datasource.go new file mode 100644 index 000000000..8f48902c1 --- /dev/null +++ b/ec/ecdatasource/privatelinkdatasource/aws_datasource.go @@ -0,0 +1,82 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package privatelinkdatasource + +import ( + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +// AwsDataSource returns the ec_aws_privatelink_endpoint data source schema. +func AwsDataSource() *schema.Resource { + return &schema.Resource{ + ReadContext: readContextFor(provider{ + name: "aws", + populateResource: populateAwsResource, + }), + + Schema: newAwsSchema(), + + Timeouts: &schema.ResourceTimeout{ + Default: schema.DefaultTimeout(5 * time.Minute), + }, + } +} + +func newAwsSchema() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "region": { + Type: schema.TypeString, + Required: true, + }, + + // Computed + "vpc_service_name": { + Type: schema.TypeString, + Computed: true, + }, + "domain_name": { + Type: schema.TypeString, + Computed: true, + }, + "zone_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + } +} + +func populateAwsResource(regionData map[string]interface{}, d *schema.ResourceData) error { + if err := copyToStateAs[string]("vpc_service_name", regionData, d); err != nil { + return err + } + + if err := copyToStateAs[string]("domain_name", regionData, d); err != nil { + return err + } + + if err := copyToStateAs[[]interface{}]("zone_ids", regionData, d); err != nil { + return err + } + + return nil +} diff --git a/ec/ecdatasource/privatelinkdatasource/aws_datasource_test.go b/ec/ecdatasource/privatelinkdatasource/aws_datasource_test.go new file mode 100644 index 000000000..f5ef2d83b --- /dev/null +++ b/ec/ecdatasource/privatelinkdatasource/aws_datasource_test.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package privatelinkdatasource + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + + "github.com/elastic/terraform-provider-ec/ec/internal/util" +) + +func Test_AwsDataSource_ReadContext(t *testing.T) { + tests := []struct { + name string + region string + diag diag.Diagnostics + endpoint *schema.ResourceData + }{ + { + name: "invalid region returns unknown regino error", + region: "unknown", + diag: diag.FromErr(fmt.Errorf("%w: unknown", errUnknownRegion)), + endpoint: util.NewResourceData(t, util.ResDataParams{ + ID: "myID", + State: map[string]interface{}{ + "id": "myID", + "region": "unknown", + }, + Schema: newAwsSchema(), + }), + }, + { + name: "valid region returns endpoint", + region: "ap-northeast-1", + endpoint: util.NewResourceData(t, util.ResDataParams{ + ID: "myID", + State: map[string]interface{}{ + "id": "myID", + "region": "ap-northeast-1", + "vpc_service_name": "com.amazonaws.vpce.ap-northeast-1.vpce-svc-0e1046d7b48d5cf5f", + "domain_name": "vpce.ap-northeast-1.aws.elastic-cloud.com", + "zone_ids": []interface{}{"apne1-az1", "apne1-az2", "apne1-az4"}, + }, + Schema: newAwsSchema(), + }), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + deploymentsSchemaArg := schema.TestResourceDataRaw(t, newAwsSchema(), nil) + deploymentsSchemaArg.SetId("myID") + _ = deploymentsSchemaArg.Set("region", tt.region) + + source := AwsDataSource() + + d := source.ReadContext(context.Background(), deploymentsSchemaArg, nil) + if tt.diag != nil { + assert.Equal(t, d, tt.diag) + } else { + assert.Nil(t, d) + } + + assert.Equal(t, tt.endpoint.State().Attributes, deploymentsSchemaArg.State().Attributes) + }) + } +} diff --git a/ec/ecdatasource/privatelinkdatasource/azure_datasource.go b/ec/ecdatasource/privatelinkdatasource/azure_datasource.go new file mode 100644 index 000000000..8aa733567 --- /dev/null +++ b/ec/ecdatasource/privatelinkdatasource/azure_datasource.go @@ -0,0 +1,71 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package privatelinkdatasource + +import ( + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +// AzureDataSource returns the ec_gcp_privateserviceconnect_endpoint data source schema. +func AzureDataSource() *schema.Resource { + return &schema.Resource{ + ReadContext: readContextFor(provider{ + name: "azure", + populateResource: populateAzureResource, + }), + + Schema: newAzureSchema(), + + Timeouts: &schema.ResourceTimeout{ + Default: schema.DefaultTimeout(5 * time.Minute), + }, + } +} + +func newAzureSchema() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "region": { + Type: schema.TypeString, + Required: true, + }, + + // Computed + "service_alias": { + Type: schema.TypeString, + Computed: true, + }, + "domain_name": { + Type: schema.TypeString, + Computed: true, + }, + } +} + +func populateAzureResource(regionData map[string]interface{}, d *schema.ResourceData) error { + if err := copyToStateAs[string]("service_alias", regionData, d); err != nil { + return err + } + + if err := copyToStateAs[string]("domain_name", regionData, d); err != nil { + return err + } + + return nil +} diff --git a/ec/ecdatasource/privatelinkdatasource/azure_datasource_test.go b/ec/ecdatasource/privatelinkdatasource/azure_datasource_test.go new file mode 100644 index 000000000..0e32269b2 --- /dev/null +++ b/ec/ecdatasource/privatelinkdatasource/azure_datasource_test.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package privatelinkdatasource + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + + "github.com/elastic/terraform-provider-ec/ec/internal/util" +) + +func Test_AzureDataSource_ReadContext(t *testing.T) { + tests := []struct { + name string + region string + diag diag.Diagnostics + endpoint *schema.ResourceData + }{ + { + name: "invalid region returns unknown regino error", + region: "unknown", + diag: diag.FromErr(fmt.Errorf("%w: unknown", errUnknownRegion)), + endpoint: util.NewResourceData(t, util.ResDataParams{ + ID: "myID", + State: map[string]interface{}{ + "id": "myID", + "region": "unknown", + }, + Schema: newAzureSchema(), + }), + }, + { + name: "valid region returns endpoint", + region: "uksouth", + endpoint: util.NewResourceData(t, util.ResDataParams{ + ID: "myID", + State: map[string]interface{}{ + "id": "myID", + "region": "uksouth", + "service_alias": "uksouth-prod-007-privatelink-service.98758729-06f7-438d-baaa-0cb63e737cdf.uksouth.azure.privatelinkservice", + "domain_name": "privatelink.uksouth.azure.elastic-cloud.com", + }, + Schema: newAzureSchema(), + }), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + deploymentsSchemaArg := schema.TestResourceDataRaw(t, newAzureSchema(), nil) + deploymentsSchemaArg.SetId("myID") + _ = deploymentsSchemaArg.Set("region", tt.region) + + source := AzureDataSource() + + d := source.ReadContext(context.Background(), deploymentsSchemaArg, nil) + if tt.diag != nil { + assert.Equal(t, d, tt.diag) + } else { + assert.Nil(t, d) + } + + assert.Equal(t, tt.endpoint.State().Attributes, deploymentsSchemaArg.State().Attributes) + }) + } +} diff --git a/ec/ecdatasource/privatelinkdatasource/datasource.go b/ec/ecdatasource/privatelinkdatasource/datasource.go new file mode 100644 index 000000000..0d1425a9e --- /dev/null +++ b/ec/ecdatasource/privatelinkdatasource/datasource.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package privatelinkdatasource + +import ( + "context" + _ "embed" + "encoding/json" + "errors" + "fmt" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +//go:embed regionPrivateLinkMap.json +var privateLinkDataJson string + +type provider struct { + name string + populateResource func(map[string]interface{}, *schema.ResourceData) error +} + +var ( + errUnknownRegion = errors.New("could not find a privatelink endpoint for region") + errUnknownProvider = errors.New("could not find a privatelink endpoint map for provider") + errMissingKey = errors.New("expected region data key not available") + errWrongType = errors.New("unexapected type in region data key") +) + +func readContextFor(p provider) func(context.Context, *schema.ResourceData, interface{}) diag.Diagnostics { + return func(ctx context.Context, rd *schema.ResourceData, i interface{}) diag.Diagnostics { + regionName, ok := rd.Get("region").(string) + if !ok { + return diag.Errorf("a region is required to lookup a privatelink endpoint") + } + + if rd.Id() == "" { + rd.SetId(strconv.Itoa(schema.HashString(fmt.Sprintf("%s:%s", p.name, regionName)))) + } + + regionData, err := getRegionData(p.name, regionName) + if err != nil { + return diag.FromErr(err) + } + + return diag.FromErr(p.populateResource(regionData, rd)) + } +} + +type configMap = map[string]interface{} +type regionToConfigMap = map[string]configMap +type providerToRegionMap = map[string]regionToConfigMap + +func getRegionData(providerName string, regionName string) (map[string]interface{}, error) { + var providerMap providerToRegionMap + if err := json.Unmarshal([]byte(privateLinkDataJson), &providerMap); err != nil { + return nil, err + } + + providerData, ok := providerMap[providerName] + if !ok { + return nil, fmt.Errorf("%w: %s", errUnknownProvider, providerName) + } + + regionData, ok := providerData[regionName] + if !ok { + return nil, fmt.Errorf("%w: %s", errUnknownRegion, regionName) + } + + return regionData, nil +} + +func copyToStateAs[T any](key string, from map[string]interface{}, rd *schema.ResourceData) error { + value, ok := from[key] + if !ok { + return fmt.Errorf("%w: %s", errMissingKey, key) + } + + castValue, ok := value.(T) + if !ok { + return fmt.Errorf("%w: %s", errWrongType, key) + } + + return rd.Set(key, castValue) +} diff --git a/ec/ecdatasource/privatelinkdatasource/gcp_datasource.go b/ec/ecdatasource/privatelinkdatasource/gcp_datasource.go new file mode 100644 index 000000000..061f1b270 --- /dev/null +++ b/ec/ecdatasource/privatelinkdatasource/gcp_datasource.go @@ -0,0 +1,71 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package privatelinkdatasource + +import ( + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +// GcpDataSource returns the ec_gcp_privateserviceconnect_endpoint data source schema. +func GcpDataSource() *schema.Resource { + return &schema.Resource{ + ReadContext: readContextFor(provider{ + name: "gcp", + populateResource: populateGcpResource, + }), + + Schema: newGcpSchema(), + + Timeouts: &schema.ResourceTimeout{ + Default: schema.DefaultTimeout(5 * time.Minute), + }, + } +} + +func newGcpSchema() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "region": { + Type: schema.TypeString, + Required: true, + }, + + // Computed + "service_attachment_uri": { + Type: schema.TypeString, + Computed: true, + }, + "domain_name": { + Type: schema.TypeString, + Computed: true, + }, + } +} + +func populateGcpResource(regionData map[string]interface{}, d *schema.ResourceData) error { + if err := copyToStateAs[string]("service_attachment_uri", regionData, d); err != nil { + return err + } + + if err := copyToStateAs[string]("domain_name", regionData, d); err != nil { + return err + } + + return nil +} diff --git a/ec/ecdatasource/privatelinkdatasource/gcp_datasource_test.go b/ec/ecdatasource/privatelinkdatasource/gcp_datasource_test.go new file mode 100644 index 000000000..29c8777e1 --- /dev/null +++ b/ec/ecdatasource/privatelinkdatasource/gcp_datasource_test.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package privatelinkdatasource + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" + + "github.com/elastic/terraform-provider-ec/ec/internal/util" +) + +func Test_GcpDataSource_ReadContext(t *testing.T) { + tests := []struct { + name string + region string + diag diag.Diagnostics + endpoint *schema.ResourceData + }{ + { + name: "invalid region returns unknown regino error", + region: "unknown", + diag: diag.FromErr(fmt.Errorf("%w: unknown", errUnknownRegion)), + endpoint: util.NewResourceData(t, util.ResDataParams{ + ID: "myID", + State: map[string]interface{}{ + "id": "myID", + "region": "unknown", + }, + Schema: newGcpSchema(), + }), + }, + { + name: "valid region returns endpoint", + region: "us-central1", + endpoint: util.NewResourceData(t, util.ResDataParams{ + ID: "myID", + State: map[string]interface{}{ + "id": "myID", + "region": "us-central1", + "service_attachment_uri": "projects/cloud-production-168820/regions/us-central1/serviceAttachments/proxy-psc-production-us-central1-v1-attachment", + "domain_name": "psc.us-central1.gcp.cloud.es.io", + }, + Schema: newGcpSchema(), + }), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + deploymentsSchemaArg := schema.TestResourceDataRaw(t, newGcpSchema(), nil) + deploymentsSchemaArg.SetId("myID") + _ = deploymentsSchemaArg.Set("region", tt.region) + + source := GcpDataSource() + + d := source.ReadContext(context.Background(), deploymentsSchemaArg, nil) + if tt.diag != nil { + assert.Equal(t, d, tt.diag) + } else { + assert.Nil(t, d) + } + + assert.Equal(t, tt.endpoint.State().Attributes, deploymentsSchemaArg.State().Attributes) + }) + } +} diff --git a/ec/ecdatasource/privatelinkdatasource/regionPrivateLinkMap.json b/ec/ecdatasource/privatelinkdatasource/regionPrivateLinkMap.json new file mode 100644 index 000000000..b2df6979d --- /dev/null +++ b/ec/ecdatasource/privatelinkdatasource/regionPrivateLinkMap.json @@ -0,0 +1,315 @@ +{ + "aws": { + "af-south-1": { + "vpc_service_name": "com.amazonaws.vpce.af-south-1.vpce-svc-0d3d7b74f60a6c32c", + "domain_name": "vpce.af-south-1.aws.elastic-cloud.com", + "zone_ids": [ + "afs1-az1", + "afs1-az2", + "afs1-az3" + ] + }, + "ap-east-1": { + "vpc_service_name": "com.amazonaws.vpce.ap-east-1.vpce-svc-0f96fbfaf55558d5c", + "domain_name": "vpce-ap-east-1.aws.elastic-cloud.com", + "zone_ids": [ + "ape1-az1", + "ape1-az2", + "ape1-az3" + ] + }, + "ap-northeast-1": { + "vpc_service_name": "com.amazonaws.vpce.ap-northeast-1.vpce-svc-0e1046d7b48d5cf5f", + "domain_name": "vpce.ap-northeast-1.aws.elastic-cloud.com", + "zone_ids": [ + "apne1-az1", + "apne1-az2", + "apne1-az4" + ] + }, + "ap-northeast-2": { + "vpc_service_name": "com.amazonaws.vpce.ap-northeast-2.vpce-svc-0d90cf62dae682b84", + "domain_name": "vpce.ap-northeast-2.aws.elastic-cloud.com", + "zone_ids": [ + "apne2-az1", + "apne2-az2", + "apne2-az3" + ] + }, + "ap-south-1": { + "vpc_service_name": "com.amazonaws.vpce.ap-south-1.vpce-svc-0e9c1ae5caa269d1b", + "domain_name": "vpce.ap-south-1.aws.elastic-cloud.com", + "zone_ids": [ + "aps1-az1", + "aps1-az2", + "aps1-az3" + ] + }, + "ap-southeast-1": { + "vpc_service_name": "com.amazonaws.vpce.ap-southeast-1.vpce-svc-0cbc6cb9bdb683a95", + "domain_name": "vpce.ap-southeast-1.aws.elastic-cloud.com", + "zone_ids": [ + "apse1-az1", + "apse1-az2", + "apse1-az" + ] + }, + "ap-southeast-2": { + "vpc_service_name": "com.amazonaws.vpce.ap-southeast-2.vpce-svc-0cde7432c1436ef13", + "domain_name": "vpce.ap-southeast-2.aws.elastic-cloud.com", + "zone_ids": [ + "apse2-az1", + "apse2-az2", + "apse2-az3" + ] + }, + "ca-central-1": { + "vpc_service_name": "com.amazonaws.vpce.ca-central-1.vpce-svc-0d3e69dd6dd336c28", + "domain_name": "vpce.ca-central-1.aws.elastic-cloud.com", + "zone_ids": [ + "cac1-az1", + "cac1-az2", + "cac1-az4" + ] + }, + "eu-central-1": { + "vpc_service_name": "com.amazonaws.vpce.eu-central-1.vpce-svc-081b2960e915a0861", + "domain_name": "vpce.eu-central-1.aws.elastic-cloud.com", + "zone_ids": [ + "euc1-az1", + "euc1-az2", + "euc1-az3" + ] + }, + "eu-south-1": { + "vpc_service_name": "com.amazonaws.vpce.eu-south-1.vpce-svc-03d8fc8a66a755237", + "domain_name": "vpce.eu-south-1.aws.elastic-cloud.com", + "zone_ids": [ + "eus1-az1", + "eus1-az2", + "eus1-az3" + ] + }, + "eu-west-1": { + "vpc_service_name": "com.amazonaws.vpce.eu-west-1.vpce-svc-01f2afe87944eb12b", + "domain_name": "vpce.eu-west-1.aws.elastic-cloud.com", + "zone_ids": [ + "euw1-az1", + "euw1-az2", + "euw1-az3" + ] + }, + "eu-west-2": { + "vpc_service_name": "com.amazonaws.vpce.eu-west-2.vpce-svc-0e42a2c194c97a1d0", + "domain_name": "vpce.eu-west-2.aws.elastic-cloud.com", + "zone_ids": [ + "euw2-az1", + "euw2-az2", + "euw2-az3" + ] + }, + "eu-west-3": { + "vpc_service_name": "com.amazonaws.vpce.eu-west-3.vpce-svc-0d6912d10db9693d1", + "domain_name": "vpce.eu-west-3.aws.elastic-cloud.com", + "zone_ids": [ + "euw3-az1", + "euw3-az2", + "euw3-az3" + ] + }, + "me-south-1": { + "vpc_service_name": "com.amazonaws.vpce.me-south-1.vpce-svc-0381de3eb670dcb48", + "domain_name": "vpce.me-south-1.aws.elastic-cloud.com", + "zone_ids": [ + "mes1-az1", + "mes1-az2", + "mes1-az3" + ] + }, + "sa-east-1": { + "vpc_service_name": "com.amazonaws.vpce.sa-east-1.vpce-svc-0b2dbce7e04dae763", + "domain_name": "vpce.sa-east-1.aws.elastic-cloud.com", + "zone_ids": [ + "sae1-az1", + "sae1-az2", + "sae1-az3" + ] + }, + "us-east-1": { + "vpc_service_name": "com.amazonaws.vpce.us-east-1.vpce-svc-0e42e1e06ed010238", + "domain_name": "vpce.us-east-1.aws.elastic-cloud.com", + "zone_ids": [ + "use1-az2", + "use1-az4", + "use1-az6" + ] + }, + "us-east-2": { + "vpc_service_name": "com.amazonaws.vpce.us-east-2.vpce-svc-02d187d2849ffb478", + "domain_name": "vpce.us-east-2.aws.elastic-cloud.com", + "zone_ids": [ + "use2-az1", + "use2-az2", + "use2-az3" + ] + }, + "us-west-1": { + "vpc_service_name": "com.amazonaws.vpce.us-west-1.vpce-svc-00def4a16a26cb1b4", + "domain_name": "vpce.us-west-1.aws.elastic-cloud.com", + "zone_ids": [ + "usw1-az1", + "usw1-az3" + ] + }, + "us-west-2": { + "vpc_service_name": "com.amazonaws.vpce.us-west-2.vpce-svc-0e69febae1fb91870", + "domain_name": "vpce.us-west-2.aws.elastic-cloud.com", + "zone_ids": [ + "usw2-az1", + "usw2-az2", + "usw2-az3" + ] + }, + "us-gov-east-1": { + "vpc_service_name": "com.amazonaws.vpce.us-gov-east-1.vpce-svc-0bba5ffa04f0cb26d", + "domain_name": "vpce.us-gov-east-1.aws.elastic-cloud.com", + "zone_ids": [] + } + }, + "gcp": { + "asia-east1": { + "service_attachment_uri": "projects/cloud-production-168820/regions/asia-east1/serviceAttachments/proxy-psc-production-asia-east1-v1-attachment", + "domain_name": "psc.asia-east1.gcp.elastic-cloud.com" + }, + "asia-northeast1": { + "service_attachment_uri": "projects/cloud-production-168820/regions/asia-northeast1/serviceAttachments/proxy-psc-production-asia-northeast1-v1-attachment", + "domain_name": "psc.asia-northeast1.gcp.cloud.es.io" + }, + "asia-northeast3": { + "service_attachment_uri": "projects/cloud-production-168820/regions/asia-northeast3/serviceAttachments/proxy-psc-production-asia-northeast3-v1-attachment", + "domain_name": "psc.asia-northeast3.gcp.elastic-cloud.com" + }, + "asia-south1": { + "service_attachment_uri": "projects/cloud-production-168820/regions/asia-south1/serviceAttachments/proxy-psc-production-asia-south1-v1-attachment", + "domain_name": "psc.asia-south1.gcp.elastic-cloud.com" + }, + "asia-southeast1": { + "service_attachment_uri": "projects/cloud-production-168820/regions/asia-southeast1/serviceAttachments/proxy-psc-production-asia-southeast1-v1-attachment", + "domain_name": "psc.asia-southeast1.gcp.elastic-cloud.com" + }, + "australia-southeast1": { + "service_attachment_uri": "projects/cloud-production-168820/regions/australia-southeast1/serviceAttachments/proxy-psc-production-australia-southeast1-v1-attachment", + "domain_name": "psc.australia-southeast1.gcp.elastic-cloud.com" + }, + "europe-north1": { + "service_attachment_uri": "projects/cloud-production-168820/regions/europe-north1/serviceAttachments/proxy-psc-production-europe-north1-v1-attachment", + "domain_name": "psc.europe-north1.gcp.elastic-cloud.com" + }, + "europe-west1": { + "service_attachment_uri": "projects/cloud-production-168820/regions/europe-west1/serviceAttachments/proxy-psc-production-europe-west1-v1-attachment", + "domain_name": "psc.europe-west1.gcp.cloud.es.io" + }, + "europe-west2": { + "service_attachment_uri": "projects/cloud-production-168820/regions/europe-west2/serviceAttachments/proxy-psc-production-europe-west2-v1-attachment", + "domain_name": "psc.europe-west2.gcp.elastic-cloud.com" + }, + "europe-west3": { + "service_attachment_uri": "projects/cloud-production-168820/regions/europe-west3/serviceAttachments/proxy-psc-production-europe-west3-v1-attachment", + "domain_name": "psc.europe-west3.gcp.cloud.es.io" + }, + "europe-west4": { + "service_attachment_uri": "projects/cloud-production-168820/regions/europe-west4/serviceAttachments/proxy-psc-production-europe-west4-v1-attachment", + "domain_name": "psc.europe-west4.gcp.elastic-cloud.com" + }, + "northamerica-northeast1": { + "service_attachment_uri": "projects/cloud-production-168820/regions/northamerica-northeast1/serviceAttachments/proxy-psc-production-northamerica-northeast1-v1-attachment", + "domain_name": "psc.northamerica-northeast.gcp.elastic-cloud.com" + }, + "southamerica-east1": { + "service_attachment_uri": "projects/cloud-production-168820/regions/southamerica-east1/serviceAttachments/proxy-psc-production-southamerica-east1-v1-attachment", + "domain_name": "psc.southamerica-east1.gcp.elastic-cloud.com" + }, + "us-central1": { + "service_attachment_uri": "projects/cloud-production-168820/regions/us-central1/serviceAttachments/proxy-psc-production-us-central1-v1-attachment", + "domain_name": "psc.us-central1.gcp.cloud.es.io" + }, + "us-east1": { + "service_attachment_uri": "projects/cloud-production-168820/regions/us-east1/serviceAttachments/proxy-psc-production-us-east1-v1-attachment", + "domain_name": "psc.us-east1.gcp.elastic-cloud.com" + }, + "us-east4": { + "service_attachment_uri": "projects/cloud-production-168820/regions/us-east4/serviceAttachments/proxy-psc-production-us-east4-v1-attachment", + "domain_name": "psc.us-east4.gcp.elastic-cloud.com" + }, + "us-west1": { + "service_attachment_uri": "projects/cloud-production-168820/regions/us-west1/serviceAttachments/proxy-psc-production-us-west1-v1-attachment", + "domain_name": "psc.us-west1.gcp.cloud.es.io" + } + }, + "azure": { + "australiaeast": { + "service_alias": "australiaeast-prod-012-privatelink-service.a0cf0c1a-33ab-4528-81e7-9cb23608f94e.australiaeast.azure.privatelinkservice", + "domain_name": "privatelink.australiaeast.azure.elastic-cloud.com" + }, + "centralus": { + "service_alias": "centralus-prod-009-privatelink-service.49a041f7-2ad1-4bd2-9898-fba7f7a1ff77.centralus.azure.privatelinkservice", + "domain_name": "privatelink.centralus.azure.elastic-cloud.com" + }, + "eastus2": { + "service_alias": "eastus2-prod-002-privatelink-service.64359fdd-7893-4215-9929-ece3287e1371.eastus2.azure.privatelinkservice", + "domain_name": "privatelink.eastus2.azure.elastic-cloud.com" + }, + "francecentral": { + "service_alias": "francecentral-prod-008-privatelink-service.8ab667fd-e8af-44b2-a347-bd48d109afec.francecentral.azure.privatelinkservice", + "domain_name": "privatelink.francecentral.azure.elastic-cloud.com" + }, + "japaneast": { + "service_alias": "japaneast-prod-006-privatelink-service.cfcf2172-917a-4260-b002-3e7183e56fd0.japaneast.azure.privatelinkservice", + "domain_name": "privatelink.japaneast.azure.elastic-cloud.com" + }, + "northeurope": { + "service_alias": "northeurope-prod-005-privatelink-service.163e4238-bdde-4a0b-a812-04650bfa41c4.northeurope.azure.privatelinkservice", + "domain_name": "privatelink.northeurope.azure.elastic-cloud.com" + }, + "southeastasia": { + "service_alias": "southeastasia-prod-004-privatelink-service.20d67dc0-2a36-40a0-af8d-0e1f997a419d.southeastasia.azure.privatelinkservice", + "domain_name": "privatelink.southeastasia.azure.elastic-cloud.com" + }, + "uksouth": { + "service_alias": "uksouth-prod-007-privatelink-service.98758729-06f7-438d-baaa-0cb63e737cdf.uksouth.azure.privatelinkservice", + "domain_name": "privatelink.uksouth.azure.elastic-cloud.com" + }, + "westeurope": { + "service_alias": "westeurope-prod-001-privatelink-service.190cd496-6d79-4ee2-8f23-0667fd5a8ec1.westeurope.azure.privatelinkservice", + "domain_name": "privatelink.westeurope.azure.elastic-cloud.com" + }, + "westus2": { + "service_alias": "westus2-prod-003-privatelink-service.b9c176b8-4fe9-41f9-916c-67cacd753ca1.westus2.azure.privatelinkservice", + "domain_name": "privatelink.westus2.azure.elastic-cloud.com" + }, + "eastus": { + "service_alias": "eastus-prod-010-privatelink-service.b5765cd8-1fc8-45e9-91fc-a9b208369f9a.eastus.azure.privatelinkservice", + "domain_name": "privatelink.eastus2.azure.elastic-cloud.com" + }, + "southcentralus": { + "service_alias": "southcentralus-prod-013-privatelink-service.f8030986-5fb9-4b0e-8463-69604233b07e.southcentralus.azure.privatelinkservice", + "domain_name": "privatelink.southcentralus.azure.elastic-cloud.com" + }, + "canadacentral": { + "service_alias": "canadacentral-prod-011-privatelink-service.203896f1-da53-4c40-b7db-0ba4e17a1019.canadacentral.azure.privatelinkservice", + "domain_name": "privatelink.canadacentral.azure.elastic-cloud.com" + }, + "brazilsouth": { + "service_alias": "brazilsouth-prod-014-privatelink-service.05813ca4-cd0f-4692-ad69-a339d023f666.brazilsouth.azure.privatelinkservice", + "domain_name": "privatelink.brazilsouth.azure.elastic-cloud.com" + }, + "centralindia": { + "service_alias": "centralindia-prod-016-privatelink-service.071806ca-8101-425b-ae86-737935a719d3.centralindia.azure.privatelinkservice", + "domain_name": "privatelink.centralindia.azure.elastic-cloud.com" + }, + "southafricanorth": { + "service_alias": "southafricanorth-prod-015-privatelink-service.b443098d-6382-42aa-9025-e0cd3ec9c103.southafricanorth.azure.privatelinkservice", + "domain_name": "privatelink.southafricanorth.azure.elastic-cloud.com" + } + } +} diff --git a/ec/ecresource/deploymentresource/apm/v1/apm.go b/ec/ecresource/deploymentresource/apm/v1/apm.go new file mode 100644 index 000000000..687aeb530 --- /dev/null +++ b/ec/ecresource/deploymentresource/apm/v1/apm.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/topology/v1" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ApmTF struct { + ElasticsearchClusterRefId types.String `tfsdk:"elasticsearch_cluster_ref_id"` + RefId types.String `tfsdk:"ref_id"` + ResourceId types.String `tfsdk:"resource_id"` + Region types.String `tfsdk:"region"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + Topology types.List `tfsdk:"topology"` + Config types.List `tfsdk:"config"` +} + +type Apm struct { + ElasticsearchClusterRefId *string `tfsdk:"elasticsearch_cluster_ref_id"` + RefId *string `tfsdk:"ref_id"` + ResourceId *string `tfsdk:"resource_id"` + Region *string `tfsdk:"region"` + HttpEndpoint *string `tfsdk:"http_endpoint"` + HttpsEndpoint *string `tfsdk:"https_endpoint"` + Topology v1.Topologies `tfsdk:"topology"` + Config ApmConfigs `tfsdk:"config"` +} + +type Apms []Apm diff --git a/ec/ecresource/deploymentresource/apm/v1/apm_config.go b/ec/ecresource/deploymentresource/apm/v1/apm_config.go new file mode 100644 index 000000000..5f82dbf0c --- /dev/null +++ b/ec/ecresource/deploymentresource/apm/v1/apm_config.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ApmConfigTF struct { + DockerImage types.String `tfsdk:"docker_image"` + DebugEnabled types.Bool `tfsdk:"debug_enabled"` + UserSettingsJson types.String `tfsdk:"user_settings_json"` + UserSettingsOverrideJson types.String `tfsdk:"user_settings_override_json"` + UserSettingsYaml types.String `tfsdk:"user_settings_yaml"` + UserSettingsOverrideYaml types.String `tfsdk:"user_settings_override_yaml"` +} + +type ApmConfig struct { + DockerImage *string `tfsdk:"docker_image"` + DebugEnabled *bool `tfsdk:"debug_enabled"` + UserSettingsJson *string `tfsdk:"user_settings_json"` + UserSettingsOverrideJson *string `tfsdk:"user_settings_override_json"` + UserSettingsYaml *string `tfsdk:"user_settings_yaml"` + UserSettingsOverrideYaml *string `tfsdk:"user_settings_override_yaml"` +} + +type ApmConfigs []ApmConfig diff --git a/ec/ecresource/deploymentresource/apm/v1/schema.go b/ec/ecresource/deploymentresource/apm/v1/schema.go new file mode 100644 index 000000000..18baf804a --- /dev/null +++ b/ec/ecresource/deploymentresource/apm/v1/schema.go @@ -0,0 +1,184 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func ApmTopologySchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional topology attribute", + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "instance_configuration_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "size": { + Type: types.StringType, + Computed: true, + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "size_resource": { + Type: types.StringType, + Description: `Optional size type, defaults to "memory".`, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "memory"}), + resource.UseStateForUnknown(), + }, + }, + "zone_count": { + Type: types.Int64Type, + Computed: true, + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + }), + } +} + +func ApmConfigSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: `Optionally define the Apm configuration options for the APM Server`, + Optional: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + // TODO + // DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + "docker_image": { + Type: types.StringType, + Description: "Optionally override the docker image the APM nodes will use. Note that this field will only work for internal users only.", + Optional: true, + }, + "debug_enabled": { + Type: types.BoolType, + Description: `Optionally enable debug mode for APM servers - defaults to false`, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.Bool{Value: false}), + resource.UseStateForUnknown(), + }, + }, + "user_settings_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + }), + } +} + +func ApmSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional APM resource definition", + Optional: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "elasticsearch_cluster_ref_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), + // resource.UseStateForUnknown(), + // planmodifier.UseStateForNoChange(), + }, + }, + "ref_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-apm"}), + // resource.UseStateForUnknown(), + // planmodifier.UseStateForNoChange(), + }, + }, + "resource_id": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + // resource.UseStateForUnknown(), + // planmodifier.UseStateForNoChange(), + }, + }, + "region": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + // resource.UseStateForUnknown(), + // planmodifier.UseStateForNoChange(), + }, + }, + "http_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + // resource.UseStateForUnknown(), + // planmodifier.UseStateForNoChange(), + }, + }, + "https_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + // resource.UseStateForUnknown(), + // planmodifier.UseStateForNoChange(), + }, + }, + "topology": ApmTopologySchema(), + "config": ApmConfigSchema(), + }), + } +} diff --git a/ec/ecresource/deploymentresource/apm/v2/apm.go b/ec/ecresource/deploymentresource/apm/v2/apm.go new file mode 100644 index 000000000..cad680c1f --- /dev/null +++ b/ec/ecresource/deploymentresource/apm/v2/apm.go @@ -0,0 +1,199 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/elastic/cloud-sdk-go/pkg/models" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v1" + topologyv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/topology/v1" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ApmTF struct { + ElasticsearchClusterRefId types.String `tfsdk:"elasticsearch_cluster_ref_id"` + RefId types.String `tfsdk:"ref_id"` + ResourceId types.String `tfsdk:"resource_id"` + Region types.String `tfsdk:"region"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + InstanceConfigurationId types.String `tfsdk:"instance_configuration_id"` + Size types.String `tfsdk:"size"` + SizeResource types.String `tfsdk:"size_resource"` + ZoneCount types.Int64 `tfsdk:"zone_count"` + Config types.Object `tfsdk:"config"` +} + +type Apm struct { + ElasticsearchClusterRefId *string `tfsdk:"elasticsearch_cluster_ref_id"` + RefId *string `tfsdk:"ref_id"` + ResourceId *string `tfsdk:"resource_id"` + Region *string `tfsdk:"region"` + HttpEndpoint *string `tfsdk:"http_endpoint"` + HttpsEndpoint *string `tfsdk:"https_endpoint"` + InstanceConfigurationId *string `tfsdk:"instance_configuration_id"` + Size *string `tfsdk:"size"` + SizeResource *string `tfsdk:"size_resource"` + ZoneCount int `tfsdk:"zone_count"` + Config *ApmConfig `tfsdk:"config"` +} + +func ReadApms(in []*models.ApmResourceInfo) (*Apm, error) { + for _, model := range in { + if util.IsCurrentApmPlanEmpty(model) || utils.IsApmResourceStopped(model) { + continue + } + + apm, err := ReadApm(model) + if err != nil { + return nil, err + } + + return apm, nil + } + + return nil, nil +} + +func ReadApm(in *models.ApmResourceInfo) (*Apm, error) { + var apm Apm + + apm.RefId = in.RefID + + apm.ResourceId = in.Info.ID + + apm.Region = in.Region + + plan := in.Info.PlanInfo.Current.Plan + + topologies, err := ReadApmTopologies(plan.ClusterTopology) + if err != nil { + return nil, err + } + + if len(topologies) > 0 { + apm.InstanceConfigurationId = topologies[0].InstanceConfigurationId + apm.Size = topologies[0].Size + apm.SizeResource = topologies[0].SizeResource + apm.ZoneCount = topologies[0].ZoneCount + } + + apm.ElasticsearchClusterRefId = in.ElasticsearchClusterRefID + + apm.HttpEndpoint, apm.HttpsEndpoint = converters.ExtractEndpoints(in.Info.Metadata) + + configs, err := readApmConfigs(plan.Apm) + if err != nil { + return nil, err + } + + if len(configs) > 0 { + apm.Config = &configs[0] + } + + return &apm, nil +} + +func (apm ApmTF) Payload(ctx context.Context, payload models.ApmPayload) (*models.ApmPayload, diag.Diagnostics) { + var diags diag.Diagnostics + + if !apm.ElasticsearchClusterRefId.IsNull() { + payload.ElasticsearchClusterRefID = &apm.ElasticsearchClusterRefId.Value + } + + if !apm.RefId.IsNull() { + payload.RefID = &apm.RefId.Value + } + + if apm.Region.Value != "" { + payload.Region = &apm.Region.Value + } + + if !apm.Config.IsNull() && !apm.Config.IsUnknown() { + var cfg v1.ApmConfigTF + + ds := tfsdk.ValueAs(ctx, apm.Config, &cfg) + + diags.Append(ds...) + + if !ds.HasError() { + diags.Append(apmConfigPayload(ctx, cfg, payload.Plan.Apm)...) + } + } + + topology := topologyv1.TopologyTF{ + InstanceConfigurationId: apm.InstanceConfigurationId, + Size: apm.Size, + SizeResource: apm.SizeResource, + ZoneCount: apm.ZoneCount, + } + + topologyPayload, ds := apmTopologyPayload(ctx, topology, defaultApmTopology(payload.Plan.ClusterTopology), 0) + + diags.Append(ds...) + + if !ds.HasError() && topologyPayload != nil { + payload.Plan.ClusterTopology = []*models.ApmTopologyElement{topologyPayload} + } + + return &payload, diags +} + +func ApmPayload(ctx context.Context, apmObj types.Object, template *models.DeploymentTemplateInfoV2) (*models.ApmPayload, diag.Diagnostics) { + var diags diag.Diagnostics + + var apm *ApmTF + + if diags = tfsdk.ValueAs(ctx, apmObj, &apm); diags.HasError() { + return nil, diags + } + + if apm == nil { + return nil, nil + } + + templatePayload := ApmResource(template) + + if templatePayload == nil { + diags.AddError("apm payload error", "apm specified but deployment template is not configured for it. Use a different template if you wish to add apm") + return nil, diags + } + + payload, diags := apm.Payload(ctx, *templatePayload) + + if diags.HasError() { + return nil, diags + } + + return payload, nil +} + +// ApmResource returns the ApmPayload from a deployment +// template or an empty version of the payload. +func ApmResource(template *models.DeploymentTemplateInfoV2) *models.ApmPayload { + if template == nil || len(template.DeploymentTemplate.Resources.Apm) == 0 { + return nil + } + return template.DeploymentTemplate.Resources.Apm[0] +} diff --git a/ec/ecresource/deploymentresource/apm/v2/apm_config.go b/ec/ecresource/deploymentresource/apm/v2/apm_config.go new file mode 100644 index 000000000..dc6dc197a --- /dev/null +++ b/ec/ecresource/deploymentresource/apm/v2/apm_config.go @@ -0,0 +1,109 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v1" + "github.com/hashicorp/terraform-plugin-framework/diag" +) + +type ApmConfig = v1.ApmConfig + +func readApmConfigs(in *models.ApmConfiguration) (v1.ApmConfigs, error) { + var cfg ApmConfig + + if in.UserSettingsYaml != "" { + cfg.UserSettingsYaml = &in.UserSettingsYaml + } + + if in.UserSettingsOverrideYaml != "" { + cfg.UserSettingsOverrideYaml = &in.UserSettingsOverrideYaml + } + + if o := in.UserSettingsJSON; o != nil { + if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { + cfg.UserSettingsJson = ec.String(string(b)) + } + } + + if o := in.UserSettingsOverrideJSON; o != nil { + if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { + cfg.UserSettingsOverrideJson = ec.String(string(b)) + } + } + + if in.DockerImage != "" { + cfg.DockerImage = &in.DockerImage + } + + if in.SystemSettings != nil { + if in.SystemSettings.DebugEnabled != nil { + cfg.DebugEnabled = in.SystemSettings.DebugEnabled + } + } + + if cfg == (ApmConfig{}) { + return nil, nil + } + + return v1.ApmConfigs{cfg}, nil +} + +func apmConfigPayload(ctx context.Context, cfg v1.ApmConfigTF, model *models.ApmConfiguration) diag.Diagnostics { + if !cfg.DebugEnabled.IsNull() { + if model.SystemSettings == nil { + model.SystemSettings = &models.ApmSystemSettings{} + } + model.SystemSettings.DebugEnabled = &cfg.DebugEnabled.Value + } + + var diags diag.Diagnostics + if cfg.UserSettingsJson.Value != "" { + if err := json.Unmarshal([]byte(cfg.UserSettingsJson.Value), &model.UserSettingsJSON); err != nil { + diags.AddError("failed expanding apm user_settings_json", err.Error()) + return diags + } + } + + if cfg.UserSettingsOverrideJson.Value != "" { + if err := json.Unmarshal([]byte(cfg.UserSettingsOverrideJson.Value), &model.UserSettingsOverrideJSON); err != nil { + diags.AddError("failed expanding apm user_settings_override_json", err.Error()) + return diags + } + } + + if !cfg.UserSettingsYaml.IsNull() { + model.UserSettingsYaml = cfg.UserSettingsYaml.Value + } + + if !cfg.UserSettingsOverrideYaml.IsNull() { + model.UserSettingsOverrideYaml = cfg.UserSettingsOverrideYaml.Value + } + + if !cfg.DockerImage.IsNull() { + model.DockerImage = cfg.DockerImage.Value + } + + return nil +} diff --git a/ec/ecresource/deploymentresource/apm/v2/apm_payload_test.go b/ec/ecresource/deploymentresource/apm/v2/apm_payload_test.go new file mode 100644 index 000000000..b264918dd --- /dev/null +++ b/ec/ecresource/deploymentresource/apm/v2/apm_payload_test.go @@ -0,0 +1,260 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" + + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v1" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/testutil" +) + +func Test_ApmPayload(t *testing.T) { + tplPath := "../../testdata/template-aws-io-optimized-v2.json" + tpl := func() *models.DeploymentTemplateInfoV2 { + return testutil.ParseDeploymentTemplate(t, tplPath) + } + type args struct { + apm *Apm + tpl *models.DeploymentTemplateInfoV2 + } + tests := []struct { + name string + args args + want *models.ApmPayload + diags diag.Diagnostics + }{ + { + name: "returns nil when there's no resources", + }, + { + name: "parses an APM resource with explicit topology", + args: args{ + tpl: tpl(), + apm: &Apm{ + RefId: ec.String("main-apm"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + InstanceConfigurationId: ec.String("aws.apm.r5d"), + Size: ec.String("2g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + want: &models.ApmPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-apm"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{}, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + }}, + }, + }, + }, + { + name: "parses an APM resource with invalid instance_configuration_id", + args: args{ + tpl: tpl(), + apm: &Apm{ + RefId: ec.String("main-apm"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + InstanceConfigurationId: ec.String("so invalid"), + Size: ec.String("2g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError( + "cannot match topology element", + `apm topology: invalid instance_configuration_id: "so invalid" doesn't match any of the deployment template instance configurations`, + ) + return diags + }(), + }, + { + name: "parses an APM resource with no topology", + args: args{ + tpl: tpl(), + apm: &Apm{ + RefId: ec.String("main-apm"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + }, + }, + want: &models.ApmPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-apm"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{}, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(512), + }, + }}, + }, + }, + }, + { + name: "parses an APM resource with a topology element but no instance_configuration_id", + args: args{ + tpl: tpl(), + apm: &Apm{ + RefId: ec.String("main-apm"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + Size: ec.String("2g"), + SizeResource: ec.String("memory"), + }, + }, + want: &models.ApmPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-apm"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{}, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + }}, + }, + }, + }, + { + name: "parses an APM resource with explicit topology and some config", + args: args{ + tpl: tpl(), + apm: &Apm{ + RefId: ec.String("tertiary-apm"), + ElasticsearchClusterRefId: ec.String("somerefid"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + Config: &v1.ApmConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\": \"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\": \"value2\"}"), + DebugEnabled: ec.Bool(true), + }, + InstanceConfigurationId: ec.String("aws.apm.r5d"), + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + want: &models.ApmPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("tertiary-apm"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{ + UserSettingsYaml: `some.setting: value`, + UserSettingsOverrideYaml: `some.setting: value2`, + UserSettingsJSON: map[string]interface{}{ + "some.setting": "value", + }, + UserSettingsOverrideJSON: map[string]interface{}{ + "some.setting": "value2", + }, + SystemSettings: &models.ApmSystemSettings{ + DebugEnabled: ec.Bool(true), + }, + }, + ClusterTopology: []*models.ApmTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + }, + }, + }, + }, + }, + { + name: "tries to parse an apm resource when the template doesn't have an APM instance set.", + args: args{ + tpl: nil, + apm: &Apm{ + RefId: ec.String("tertiary-apm"), + ElasticsearchClusterRefId: ec.String("somerefid"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + InstanceConfigurationId: ec.String("aws.apm.r5d"), + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + Config: &v1.ApmConfig{ + DebugEnabled: ec.Bool(true), + }, + }, + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("apm payload error", "apm specified but deployment template is not configured for it. Use a different template if you wish to add apm") + return diags + }(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var apm types.Object + diags := tfsdk.ValueFrom(context.Background(), tt.args.apm, ApmSchema().FrameworkType(), &apm) + assert.Nil(t, diags) + + if got, diags := ApmPayload(context.Background(), apm, tt.args.tpl); tt.diags != nil { + assert.Equal(t, tt.diags, diags) + } else { + assert.Nil(t, diags) + assert.Equal(t, tt.want, got) + } + }) + } +} diff --git a/ec/ecresource/deploymentresource/apm_flatteners_test.go b/ec/ecresource/deploymentresource/apm/v2/apm_read_test.go similarity index 69% rename from ec/ecresource/deploymentresource/apm_flatteners_test.go rename to ec/ecresource/deploymentresource/apm/v2/apm_read_test.go index 94441116b..a4f974c18 100644 --- a/ec/ecresource/deploymentresource/apm_flatteners_test.go +++ b/ec/ecresource/deploymentresource/apm/v2/apm_read_test.go @@ -15,32 +15,39 @@ // specific language governing permissions and limitations // under the License. -package deploymentresource +package v2 import ( + "context" "testing" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v1" ) -func Test_flattenApmResource(t *testing.T) { +func Test_readApm(t *testing.T) { type args struct { - in []*models.ApmResourceInfo - name string + in []*models.ApmResourceInfo } + tests := []struct { - name string - args args - want []interface{} + name string + args args + want *Apm + diags diag.Diagnostics }{ { - name: "empty resource list returns empty list", - args: args{in: []*models.ApmResourceInfo{}}, - want: []interface{}{}, + name: "empty resource list returns empty list", + args: args{in: []*models.ApmResourceInfo{}}, + want: nil, + diags: nil, }, { name: "empty current plan returns empty list", @@ -53,7 +60,8 @@ func Test_flattenApmResource(t *testing.T) { }, }, }}, - want: []interface{}{}, + want: nil, + diags: nil, }, { name: "parses the apm resource", @@ -94,23 +102,17 @@ func Test_flattenApmResource(t *testing.T) { }, }, }}, - want: []interface{}{ - map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-apm", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "http_endpoint": "http://apmresource.cloud.elastic.co:9200", - "https_endpoint": "https://apmresource.cloud.elastic.co:9243", - "topology": []interface{}{ - map[string]interface{}{ - "instance_configuration_id": "aws.apm.r4", - "size": "1g", - "size_resource": "memory", - "zone_count": int32(1), - }, - }, - }, + want: &Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + HttpEndpoint: ec.String("http://apmresource.cloud.elastic.co:9200"), + HttpsEndpoint: ec.String("https://apmresource.cloud.elastic.co:9243"), + InstanceConfigurationId: ec.String("aws.apm.r4"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, }, }, { @@ -205,26 +207,24 @@ func Test_flattenApmResource(t *testing.T) { }, }, }}, - want: []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-apm", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "http_endpoint": "http://apmresource.cloud.elastic.co:9200", - "https_endpoint": "https://apmresource.cloud.elastic.co:9243", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.apm.r4", - "size": "1g", - "size_resource": "memory", - "zone_count": int32(1), - }}, - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: value2", - "user_settings_json": "{\"some.setting\":\"value\"}", - "user_settings_override_json": "{\"some.setting\":\"value2\"}", - }}, - }}, + want: &Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + HttpEndpoint: ec.String("http://apmresource.cloud.elastic.co:9200"), + HttpsEndpoint: ec.String("https://apmresource.cloud.elastic.co:9243"), + InstanceConfigurationId: ec.String("aws.apm.r4"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + Config: &v1.ApmConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), + }, + }, }, { name: "parses the apm resource with config overrides and system settings", @@ -276,34 +276,36 @@ func Test_flattenApmResource(t *testing.T) { }, }, }}, - want: []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-apm", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "http_endpoint": "http://apmresource.cloud.elastic.co:9200", - "https_endpoint": "https://apmresource.cloud.elastic.co:9243", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.apm.r4", - "size": "1g", - "size_resource": "memory", - "zone_count": int32(1), - }}, - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: value2", - "user_settings_json": "{\"some.setting\":\"value\"}", - "user_settings_override_json": "{\"some.setting\":\"value2\"}", - - "debug_enabled": true, - }}, - }}, + want: &Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + HttpEndpoint: ec.String("http://apmresource.cloud.elastic.co:9200"), + HttpsEndpoint: ec.String("https://apmresource.cloud.elastic.co:9243"), + InstanceConfigurationId: ec.String("aws.apm.r4"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + Config: &v1.ApmConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), + DebugEnabled: ec.Bool(true), + }, + }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenApmResources(tt.args.in, tt.args.name) - assert.Equal(t, tt.want, got) + apms, err := ReadApms(tt.args.in) + assert.Nil(t, err) + assert.Equal(t, tt.want, apms) + + var apmTF types.Object + diags := tfsdk.ValueFrom(context.Background(), apms, ApmSchema().FrameworkType(), &apmTF) + assert.Nil(t, diags) }) } } diff --git a/ec/ecresource/deploymentresource/apm/v2/apm_topology.go b/ec/ecresource/deploymentresource/apm/v2/apm_topology.go new file mode 100644 index 000000000..444a9b527 --- /dev/null +++ b/ec/ecresource/deploymentresource/apm/v2/apm_topology.go @@ -0,0 +1,135 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "fmt" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/topology/v1" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/diag" +) + +const ( + minimumApmSize = 512 +) + +func ReadApmTopology(in *models.ApmTopologyElement) (*v1.Topology, error) { + var top v1.Topology + + if in.InstanceConfigurationID != "" { + top.InstanceConfigurationId = &in.InstanceConfigurationID + } + + if in.Size != nil { + top.Size = ec.String(util.MemoryToState(*in.Size.Value)) + top.SizeResource = ec.String(*in.Size.Resource) + } + + top.ZoneCount = int(in.ZoneCount) + + return &top, nil +} + +func ReadApmTopologies(in []*models.ApmTopologyElement) (v1.Topologies, error) { + topologies := make([]v1.Topology, 0, len(in)) + + for _, model := range in { + if model.Size == nil || model.Size.Value == nil || *model.Size.Value == 0 { + continue + } + + topology, err := ReadApmTopology(model) + if err != nil { + return nil, nil + } + + topologies = append(topologies, *topology) + } + + return topologies, nil +} + +// defaultApmTopology iterates over all the templated topology elements and +// sets the size to the default when the template size is smaller than the +// deployment template default, the same is done on the ZoneCount. +func defaultApmTopology(topology []*models.ApmTopologyElement) []*models.ApmTopologyElement { + for _, t := range topology { + if *t.Size.Value < minimumApmSize { + t.Size.Value = ec.Int32(minimumApmSize) + } + if t.ZoneCount < utils.MinimumZoneCount { + t.ZoneCount = utils.MinimumZoneCount + } + } + + return topology +} + +func apmTopologyPayload(ctx context.Context, topology v1.TopologyTF, planModels []*models.ApmTopologyElement, index int) (*models.ApmTopologyElement, diag.Diagnostics) { + + icID := topology.InstanceConfigurationId.Value + + // When a topology element is set but no instance_configuration_id + // is set, then obtain the instance_configuration_id from the topology + // element. + if icID == "" && index < len(planModels) { + icID = planModels[index].InstanceConfigurationID + } + + size, err := converters.ParseTopologySizeTF(topology.Size, topology.SizeResource) + + var diags diag.Diagnostics + if err != nil { + diags.AddError("size parsing error", err.Error()) + return nil, diags + } + + topologyElem, err := matchApmTopology(icID, planModels) + if err != nil { + diags.AddError("cannot match topology element", err.Error()) + return nil, diags + } + + if size != nil { + topologyElem.Size = size + } + + if topology.ZoneCount.Value > 0 { + topologyElem.ZoneCount = int32(topology.ZoneCount.Value) + } + + return topologyElem, nil +} + +func matchApmTopology(id string, topologies []*models.ApmTopologyElement) (*models.ApmTopologyElement, error) { + for _, t := range topologies { + if t.InstanceConfigurationID == id { + return t, nil + } + } + return nil, fmt.Errorf( + `apm topology: invalid instance_configuration_id: "%s" doesn't match any of the deployment template instance configurations`, + id, + ) +} diff --git a/ec/ecresource/deploymentresource/apm/v2/schema.go b/ec/ecresource/deploymentresource/apm/v2/schema.go new file mode 100644 index 000000000..5659352bf --- /dev/null +++ b/ec/ecresource/deploymentresource/apm/v2/schema.go @@ -0,0 +1,167 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func ApmConfigSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: `Optionally define the Apm configuration options for the APM Server`, + Optional: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + // TODO + // DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + "docker_image": { + Type: types.StringType, + Description: "Optionally override the docker image the APM nodes will use. Note that this field will only work for internal users only.", + Optional: true, + }, + "debug_enabled": { + Type: types.BoolType, + Description: `Optionally enable debug mode for APM servers - defaults to false`, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.Bool{Value: false}), + resource.UseStateForUnknown(), + }, + }, + "user_settings_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + }), + } +} + +func ApmSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional APM resource definition", + Optional: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "elasticsearch_cluster_ref_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), + // resource.UseStateForUnknown(), + // planmodifier.UseStateForNoChange(), + }, + }, + "ref_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-apm"}), + // resource.UseStateForUnknown(), + // planmodifier.UseStateForNoChange(), + }, + }, + "resource_id": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + // resource.UseStateForUnknown(), + // planmodifier.UseStateForNoChange(), + }, + }, + "region": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + // resource.UseStateForUnknown(), + // planmodifier.UseStateForNoChange(), + }, + }, + "http_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + // resource.UseStateForUnknown(), + // planmodifier.UseStateForNoChange(), + }, + }, + "https_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + // resource.UseStateForUnknown(), + // planmodifier.UseStateForNoChange(), + }, + }, + "instance_configuration_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "size": { + Type: types.StringType, + Computed: true, + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "size_resource": { + Type: types.StringType, + Description: `Optional size type, defaults to "memory".`, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "memory"}), + resource.UseStateForUnknown(), + }, + }, + "zone_count": { + Type: types.Int64Type, + Computed: true, + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "config": ApmConfigSchema(), + }), + } +} diff --git a/ec/ecresource/deploymentresource/apm_expanders.go b/ec/ecresource/deploymentresource/apm_expanders.go deleted file mode 100644 index e2bd44b48..000000000 --- a/ec/ecresource/deploymentresource/apm_expanders.go +++ /dev/null @@ -1,207 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "encoding/json" - "errors" - "fmt" - - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -// expandApmResources expands apm resources into their models. -func expandApmResources(apms []interface{}, tpl *models.ApmPayload) ([]*models.ApmPayload, error) { - if len(apms) == 0 { - return nil, nil - } - - if tpl == nil { - return nil, errors.New("apm specified but deployment template is not configured for it. Use a different template if you wish to add apm") - } - - result := make([]*models.ApmPayload, 0, len(apms)) - for _, raw := range apms { - resResource, err := expandApmResource(raw, tpl) - if err != nil { - return nil, err - } - result = append(result, resResource) - } - - return result, nil -} - -func expandApmResource(raw interface{}, res *models.ApmPayload) (*models.ApmPayload, error) { - var apm = raw.(map[string]interface{}) - - if esRefID, ok := apm["elasticsearch_cluster_ref_id"]; ok { - res.ElasticsearchClusterRefID = ec.String(esRefID.(string)) - } - - if refID, ok := apm["ref_id"]; ok { - res.RefID = ec.String(refID.(string)) - } - - if region, ok := apm["region"]; ok { - if r := region.(string); r != "" { - res.Region = ec.String(r) - } - } - - if cfg, ok := apm["config"]; ok { - if err := expandApmConfig(cfg, res.Plan.Apm); err != nil { - return nil, err - } - } - - if rt, ok := apm["topology"]; ok && len(rt.([]interface{})) > 0 { - topology, err := expandApmTopology(rt, res.Plan.ClusterTopology) - if err != nil { - return nil, err - } - res.Plan.ClusterTopology = topology - } else { - res.Plan.ClusterTopology = defaultApmTopology(res.Plan.ClusterTopology) - } - - return res, nil -} - -func expandApmTopology(raw interface{}, topologies []*models.ApmTopologyElement) ([]*models.ApmTopologyElement, error) { - rawTopologies := raw.([]interface{}) - res := make([]*models.ApmTopologyElement, 0, len(rawTopologies)) - - for i, rawTop := range rawTopologies { - topology := rawTop.(map[string]interface{}) - var icID string - if id, ok := topology["instance_configuration_id"]; ok { - icID = id.(string) - } - // When a topology element is set but no instance_configuration_id - // is set, then obtain the instance_configuration_id from the topology - // element. - if t := defaultApmTopology(topologies); icID == "" && len(t) >= i { - icID = t[i].InstanceConfigurationID - } - - size, err := util.ParseTopologySize(topology) - if err != nil { - return nil, err - } - - elem, err := matchApmTopology(icID, topologies) - if err != nil { - return nil, err - } - if size != nil { - elem.Size = size - } - - if zones, ok := topology["zone_count"]; ok { - if z := zones.(int); z > 0 { - elem.ZoneCount = int32(z) - } - - } - - res = append(res, elem) - } - - return res, nil -} - -func expandApmConfig(raw interface{}, res *models.ApmConfiguration) error { - for _, rawCfg := range raw.([]interface{}) { - var cfg = rawCfg.(map[string]interface{}) - - if debugEnabled, ok := cfg["debug_enabled"]; ok { - if res.SystemSettings == nil { - res.SystemSettings = &models.ApmSystemSettings{} - } - res.SystemSettings.DebugEnabled = ec.Bool(debugEnabled.(bool)) - } - - if settings, ok := cfg["user_settings_json"]; ok && settings != nil { - if s, ok := settings.(string); ok && s != "" { - if err := json.Unmarshal([]byte(s), &res.UserSettingsJSON); err != nil { - return fmt.Errorf("failed expanding apm user_settings_json: %w", err) - } - } - } - if settings, ok := cfg["user_settings_override_json"]; ok && settings != nil { - if s, ok := settings.(string); ok && s != "" { - if err := json.Unmarshal([]byte(s), &res.UserSettingsOverrideJSON); err != nil { - return fmt.Errorf("failed expanding apm user_settings_override_json: %w", err) - } - } - } - if settings, ok := cfg["user_settings_yaml"]; ok { - res.UserSettingsYaml = settings.(string) - } - if settings, ok := cfg["user_settings_override_yaml"]; ok { - res.UserSettingsOverrideYaml = settings.(string) - } - - if v, ok := cfg["docker_image"]; ok { - res.DockerImage = v.(string) - } - } - - return nil -} - -// defaultApmTopology iterates over all the templated topology elements and -// sets the size to the default when the template size is smaller than the -// deployment template default, the same is done on the ZoneCount. -func defaultApmTopology(topology []*models.ApmTopologyElement) []*models.ApmTopologyElement { - for _, t := range topology { - if *t.Size.Value < minimumApmSize { - t.Size.Value = ec.Int32(minimumApmSize) - } - if t.ZoneCount < minimumZoneCount { - t.ZoneCount = minimumZoneCount - } - } - - return topology -} - -func matchApmTopology(id string, topologies []*models.ApmTopologyElement) (*models.ApmTopologyElement, error) { - for _, t := range topologies { - if t.InstanceConfigurationID == id { - return t, nil - } - } - return nil, fmt.Errorf( - `apm topology: invalid instance_configuration_id: "%s" doesn't match any of the deployment template instance configurations`, - id, - ) -} - -// apmResource returns the ApmPayload from a deployment -// template or an empty version of the payload. -func apmResource(res *models.DeploymentTemplateInfoV2) *models.ApmPayload { - if len(res.DeploymentTemplate.Resources.Apm) == 0 { - return nil - } - return res.DeploymentTemplate.Resources.Apm[0] -} diff --git a/ec/ecresource/deploymentresource/apm_expanders_test.go b/ec/ecresource/deploymentresource/apm_expanders_test.go deleted file mode 100644 index 4ecefabe7..000000000 --- a/ec/ecresource/deploymentresource/apm_expanders_test.go +++ /dev/null @@ -1,264 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" -) - -func Test_expandApmResources(t *testing.T) { - tplPath := "testdata/template-aws-io-optimized-v2.json" - tpl := func() *models.ApmPayload { - return apmResource(parseDeploymentTemplate(t, - tplPath, - )) - } - type args struct { - ess []interface{} - tpl *models.ApmPayload - } - tests := []struct { - name string - args args - want []*models.ApmPayload - err error - }{ - { - name: "returns nil when there's no resources", - }, - { - name: "parses an APM resource with explicit topology", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-apm", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.apm.r5d", - "size": "2g", - "size_resource": "memory", - "zone_count": 1, - }}, - }, - }, - }, - want: []*models.ApmPayload{ - { - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{}, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - }}, - }, - }, - }, - }, - { - name: "parses an APM resource with invalid instance_configuration_id", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-apm", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "so invalid", - "size": "2g", - "size_resource": "memory", - "zone_count": 1, - }}, - }, - }, - }, - err: errors.New(`apm topology: invalid instance_configuration_id: "so invalid" doesn't match any of the deployment template instance configurations`), - }, - { - name: "parses an APM resource with no topology", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-apm", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - }, - }, - }, - want: []*models.ApmPayload{ - { - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{}, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(512), - }, - }}, - }, - }, - }, - }, - { - name: "parses an APM resource with a topology element but no instance_configuration_id", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-apm", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "size": "2g", - "size_resource": "memory", - }}, - }, - }, - }, - want: []*models.ApmPayload{ - { - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{}, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - }}, - }, - }, - }, - }, - { - name: "parses an APM resource with explicit topology and some config", - args: args{ - tpl: tpl(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "tertiary-apm", - "elasticsearch_cluster_ref_id": "somerefid", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: value2", - "user_settings_json": "{\"some.setting\": \"value\"}", - "user_settings_override_json": "{\"some.setting\": \"value2\"}", - "debug_enabled": true, - }}, - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.apm.r5d", - "size": "4g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - want: []*models.ApmPayload{{ - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("tertiary-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{ - UserSettingsYaml: `some.setting: value`, - UserSettingsOverrideYaml: `some.setting: value2`, - UserSettingsJSON: map[string]interface{}{ - "some.setting": "value", - }, - UserSettingsOverrideJSON: map[string]interface{}{ - "some.setting": "value2", - }, - SystemSettings: &models.ApmSystemSettings{ - DebugEnabled: ec.Bool(true), - }, - }, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - }}, - }, - }}, - }, - { - name: "tries to parse an apm resource when the template doesn't have an APM instance set.", - args: args{ - tpl: nil, - ess: []interface{}{map[string]interface{}{ - "ref_id": "tertiary-apm", - "elasticsearch_cluster_ref_id": "somerefid", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.apm.r5d", - "size": "4g", - "size_resource": "memory", - "zone_count": 1, - }}, - "config": []interface{}{map[string]interface{}{ - "debug_enabled": true, - }}, - }}, - }, - err: errors.New("apm specified but deployment template is not configured for it. Use a different template if you wish to add apm"), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := expandApmResources(tt.args.ess, tt.args.tpl) - if !assert.Equal(t, tt.err, err) { - t.Error(err) - } - - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/ec/ecresource/deploymentresource/apm_flatteners.go b/ec/ecresource/deploymentresource/apm_flatteners.go deleted file mode 100644 index c16c430da..000000000 --- a/ec/ecresource/deploymentresource/apm_flatteners.go +++ /dev/null @@ -1,154 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "bytes" - "encoding/json" - - "github.com/elastic/cloud-sdk-go/pkg/models" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -// flattenApmResources flattens apm resources into its flattened structure. -func flattenApmResources(in []*models.ApmResourceInfo, name string) []interface{} { - var result = make([]interface{}, 0, len(in)) - for _, res := range in { - var m = make(map[string]interface{}) - if util.IsCurrentApmPlanEmpty(res) || isApmResourceStopped(res) { - continue - } - - if res.RefID != nil && *res.RefID != "" { - m["ref_id"] = *res.RefID - } - - if res.Info.ID != nil && *res.Info.ID != "" { - m["resource_id"] = *res.Info.ID - } - - if res.Region != nil { - m["region"] = *res.Region - } - - plan := res.Info.PlanInfo.Current.Plan - if topology := flattenApmTopology(plan); len(topology) > 0 { - m["topology"] = topology - } - - if res.ElasticsearchClusterRefID != nil { - m["elasticsearch_cluster_ref_id"] = *res.ElasticsearchClusterRefID - } - - for k, v := range util.FlattenClusterEndpoint(res.Info.Metadata) { - m[k] = v - } - - if cfg := flattenApmConfig(plan.Apm); len(cfg) > 0 { - m["config"] = cfg - } - - result = append(result, m) - } - - return result -} - -func flattenApmTopology(plan *models.ApmPlan) []interface{} { - var result = make([]interface{}, 0, len(plan.ClusterTopology)) - for _, topology := range plan.ClusterTopology { - var m = make(map[string]interface{}) - if topology.Size == nil || topology.Size.Value == nil || *topology.Size.Value == 0 { - continue - } - - if topology.InstanceConfigurationID != "" { - m["instance_configuration_id"] = topology.InstanceConfigurationID - } - - if topology.Size != nil { - m["size"] = util.MemoryToState(*topology.Size.Value) - m["size_resource"] = *topology.Size.Resource - } - - m["zone_count"] = topology.ZoneCount - - result = append(result, m) - } - - return result -} - -func flattenApmConfig(cfg *models.ApmConfiguration) []interface{} { - var m = make(map[string]interface{}) - if cfg == nil { - return nil - } - - if cfg.UserSettingsYaml != "" { - m["user_settings_yaml"] = cfg.UserSettingsYaml - } - - if cfg.UserSettingsOverrideYaml != "" { - m["user_settings_override_yaml"] = cfg.UserSettingsOverrideYaml - } - - if o := cfg.UserSettingsJSON; o != nil { - if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { - m["user_settings_json"] = string(b) - } - } - - if o := cfg.UserSettingsOverrideJSON; o != nil { - if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { - m["user_settings_override_json"] = string(b) - } - } - - if cfg.DockerImage != "" { - m["docker_image"] = cfg.DockerImage - } - - for k, v := range flattenApmSystemConfig(cfg.SystemSettings) { - m[k] = v - } - - if len(m) == 0 { - return nil - } - - return []interface{}{m} -} - -func flattenApmSystemConfig(cfg *models.ApmSystemSettings) map[string]interface{} { - var m = make(map[string]interface{}) - if cfg == nil { - return nil - } - - if cfg.DebugEnabled != nil { - m["debug_enabled"] = *cfg.DebugEnabled - } - - if len(m) == 0 { - return nil - } - - return m -} diff --git a/ec/ecresource/deploymentresource/create.go b/ec/ecresource/deploymentresource/create.go index 26dfc6377..c63a19c05 100644 --- a/ec/ecresource/deploymentresource/create.go +++ b/ec/ecresource/deploymentresource/create.go @@ -21,63 +21,76 @@ import ( "context" "fmt" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" - "github.com/elastic/cloud-sdk-go/pkg/multierror" + deploymentv "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/deployment/v2" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-log/tflog" ) -// createResource will createResource a new deployment from the specified settings. -func createResource(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*api.API) - reqID := deploymentapi.RequestID(d.Get("request_id").(string)) +func (r *Resource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + if !r.ready(&resp.Diagnostics) { + return + } - req, err := createResourceToModel(d, client) - if err != nil { - return diag.FromErr(err) + var config deploymentv.DeploymentTF + diags := req.Config.Get(ctx, &config) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + var plan deploymentv.DeploymentTF + diags = req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + request, diags := plan.CreateRequest(ctx, r.client) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return } + requestId := deploymentapi.RequestID(plan.RequestId.Value) + res, err := deploymentapi.Create(deploymentapi.CreateParams{ - API: client, - RequestID: reqID, - Request: req, + API: r.client, + RequestID: requestId, + Request: request, Overrides: &deploymentapi.PayloadOverrides{ - Name: d.Get("name").(string), - Version: d.Get("version").(string), - Region: d.Get("region").(string), + Name: plan.Name.Value, + Version: plan.Version.Value, + Region: plan.Region.Value, }, }) + if err != nil { - merr := multierror.NewPrefixed("failed creating deployment", err) - return diag.FromErr(merr.Append(newCreationError(reqID))) + resp.Diagnostics.AddError("failed creating deployment", err.Error()) + resp.Diagnostics.AddError("failed creating deployment", newCreationError(requestId).Error()) + return } - if err := WaitForPlanCompletion(client, *res.ID); err != nil { - merr := multierror.NewPrefixed("failed tracking create progress", err) - return diag.FromErr(merr.Append(newCreationError(reqID))) + if err := WaitForPlanCompletion(r.client, *res.ID); err != nil { + resp.Diagnostics.AddError("failed tracking create progress", newCreationError(requestId).Error()) + return } - d.SetId(*res.ID) + tflog.Trace(ctx, "created a resource") - // Since before the deployment has been read, there's no real state - // persisted, it'd better to handle each of the errors by appending - // it to the `diag.Diagnostics` since it has support for it. - var diags diag.Diagnostics - if err := handleRemoteClusters(d, client); err != nil { - diags = append(diags, diag.FromErr(err)...) - } + resp.Diagnostics.Append(deploymentv.HandleRemoteClusters(ctx, r.client, *res.ID, plan.Elasticsearch)...) - if diag := readResource(ctx, d, meta); diag != nil { - diags = append(diags, diags...) - } + deployment, diags := r.read(ctx, *res.ID, nil, plan, res.Resources) + + resp.Diagnostics.Append(diags...) - if err := parseCredentials(d, res.Resources); err != nil { - diags = append(diags, diag.FromErr(err)...) + if deployment == nil { + resp.Diagnostics.AddError("cannot read just created resource", "") + resp.State.RemoveResource(ctx) + return } - return diags + resp.Diagnostics.Append(resp.State.Set(ctx, deployment)...) } func newCreationError(reqID string) error { diff --git a/ec/ecresource/deploymentresource/delete.go b/ec/ecresource/deploymentresource/delete.go index 8ee3c4253..7f3111338 100644 --- a/ec/ecresource/deploymentresource/delete.go +++ b/ec/ecresource/deploymentresource/delete.go @@ -20,65 +20,53 @@ package deploymentresource import ( "context" "errors" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" "github.com/elastic/cloud-sdk-go/pkg/client/deployments" - "github.com/elastic/cloud-sdk-go/pkg/multierror" + deploymentv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/deployment/v2" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/resource" ) -// Delete shuts down and deletes the remote deployment retrying up to 3 times -// the Shutdown API call in case the plan returns with a failure that contains -// the "Timeout Exceeded" string, which is a fairly common transient error state -// returned from the API. -func deleteResource(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - const maxRetries = 3 - var retries int - timeout := d.Timeout(schema.TimeoutDelete) - client := meta.(*api.API) - - return diag.FromErr(resource.RetryContext(ctx, timeout, func() *resource.RetryError { - if _, err := deploymentapi.Shutdown(deploymentapi.ShutdownParams{ - API: client, DeploymentID: d.Id(), - }); err != nil { - if alreadyDestroyed(err) { - d.SetId("") - return nil - } - return resource.NonRetryableError(multierror.NewPrefixed( - "failed shutting down the deployment", err, - )) - } +func (r *Resource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + if !r.ready(&resp.Diagnostics) { + return + } - if err := WaitForPlanCompletion(client, d.Id()); err != nil { - if shouldRetryShutdown(err, retries, maxRetries) { - retries++ - return resource.RetryableError(err) - } - return resource.NonRetryableError(err) - } + var state deploymentv2.DeploymentTF + + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + //TODO retries - if err := handleTrafficFilterChange(d, client); err != nil { - return resource.NonRetryableError(err) + if _, err := deploymentapi.Shutdown(deploymentapi.ShutdownParams{ + API: r.client, DeploymentID: state.Id.Value, + }); err != nil { + if alreadyDestroyed(err) { + return } + } + + if err := WaitForPlanCompletion(r.client, state.Id.Value); err != nil { + resp.Diagnostics.AddError("deployment deletion error", err.Error()) + return + } - // We don't particularly care if delete succeeds or not. It's better to - // remove it, but it might fail on ESS. For example, when user's aren't - // allowed to delete deployments, or on ECE when the cluster is "still - // being shutdown". Sumarizing, even if the call fails the deployment - // won't be there. - _, _ = deploymentapi.Delete(deploymentapi.DeleteParams{ - API: client, DeploymentID: d.Id(), - }) - - d.SetId("") - return nil - })) + // We don't particularly care if delete succeeds or not. It's better to + // remove it, but it might fail on ESS. For example, when user's aren't + // allowed to delete deployments, or on ECE when the cluster is "still + // being shutdown". Sumarizing, even if the call fails the deployment + // won't be there. + _, _ = deploymentapi.Delete(deploymentapi.DeleteParams{ + API: r.client, DeploymentID: state.Id.Value, + }) } func alreadyDestroyed(err error) bool { @@ -86,17 +74,30 @@ func alreadyDestroyed(err error) bool { return errors.As(err, &destroyed) } -func shouldRetryShutdown(err error, retries, maxRetries int) bool { - const timeout = "Timeout exceeded" - needsRetry := retries < maxRetries +func removeRule(ruleID, deploymentID string, client *api.API) error { + res, err := trafficfilterapi.Get(trafficfilterapi.GetParams{ + API: client, ID: ruleID, IncludeAssociations: true, + }) - var isTimeout, isFailDeallocate bool + // If the rule is gone (403 or 404), return nil. if err != nil { - isTimeout = strings.Contains(err.Error(), timeout) - isFailDeallocate = strings.Contains( - err.Error(), "Some instances were not stopped", - ) + if util.TrafficFilterNotFound(err) { + return nil + } + return err } - return (needsRetry && isTimeout) || - (needsRetry && isFailDeallocate) + + // If the rule is found, then delete the association. + for _, assoc := range res.Associations { + if deploymentID == *assoc.ID { + return trafficfilterapi.DeleteAssociation(trafficfilterapi.DeleteAssociationParams{ + API: client, + ID: ruleID, + EntityID: *assoc.ID, + EntityType: *assoc.EntityType, + }) + } + } + + return nil } diff --git a/ec/ecresource/deploymentresource/delete_test.go b/ec/ecresource/deploymentresource/delete_test.go deleted file mode 100644 index b06b1cd0e..000000000 --- a/ec/ecresource/deploymentresource/delete_test.go +++ /dev/null @@ -1,215 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "context" - "errors" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/multierror" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -func Test_deleteResource(t *testing.T) { - tc500Err := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - wantTC500 := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - - tc404Err := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - wantTC404 := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - wantTC404.SetId("") - - type args struct { - d *schema.ResourceData - meta interface{} - } - tests := []struct { - name string - args args - want diag.Diagnostics - wantRD *schema.ResourceData - }{ - { - name: "returns an error when it receives a 500", - args: args{ - d: tc500Err, - meta: api.NewMock(mock.NewErrorResponse(500, mock.APIError{ - Code: "some", Message: "message", - })), - }, - want: diag.Diagnostics{ - { - Severity: diag.Error, - Summary: "failed shutting down the deployment: 1 error occurred:\n\t* api error: some: message\n\n", - }, - }, - wantRD: wantTC500, - }, - { - name: "returns nil and unsets the state when the error is known", - args: args{ - d: tc404Err, - meta: api.NewMock(mock.NewErrorResponse(404, mock.APIError{ - Code: "some", Message: "message", - })), - }, - want: nil, - wantRD: wantTC404, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := deleteResource(context.Background(), tt.args.d, tt.args.meta) - assert.Equal(t, tt.want, got) - var want interface{} - if tt.wantRD != nil { - if s := tt.wantRD.State(); s != nil { - want = s.Attributes - } - } - - var gotState interface{} - if s := tt.args.d.State(); s != nil { - gotState = s.Attributes - } - - assert.Equal(t, want, gotState) - }) - } -} - -func Test_shouldRetryShutdown(t *testing.T) { - type args struct { - err error - retries int - maxRetries int - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "returns false when error doesn't contain timeout string", - args: args{ - err: errors.New("some error"), - retries: 1, - maxRetries: 10, - }, - want: false, - }, - { - name: "returns false when the error is nil", - args: args{ - retries: 1, - maxRetries: 10, - }, - want: false, - }, - { - name: "returns false when error doesn't contain timeout string", - args: args{ - err: errors.New("timeout exceeded"), - retries: 1, - maxRetries: 10, - }, - want: false, - }, - { - name: "returns true when error contains timeout string", - args: args{ - err: errors.New("Timeout exceeded"), - retries: 1, - maxRetries: 10, - }, - want: true, - }, - { - name: "returns true when error contains timeout string", - args: args{ - err: multierror.NewPrefixed("aa", - errors.New("Timeout exceeded"), - ), - retries: 1, - maxRetries: 10, - }, - want: true, - }, - { - name: "returns true when error contains a deallocation failure string", - args: args{ - err: multierror.NewPrefixed("aa", - errors.New(`deployment [8f3c85f97536163ad117a6d37b377120] - [elasticsearch][39dd873845bc43f9b3b21b87fe1a3c99]: caught error: "Plan change failed: Some instances were not stopped`), - ), - retries: 1, - maxRetries: 10, - }, - want: true, - }, - { - name: "returns false when error contains timeout string but exceeds max timeouts", - args: args{ - err: errors.New("Timeout exceeded"), - retries: 10, - maxRetries: 10, - }, - want: false, - }, - { - name: "returns false when error contains a deallocation failure string", - args: args{ - err: multierror.NewPrefixed("aa", - errors.New(`deployment [8f3c85f97536163ad117a6d37b377120] - [elasticsearch][39dd873845bc43f9b3b21b87fe1a3c99]: caught error: "Plan change failed: Some instances were not stopped`), - ), - retries: 10, - maxRetries: 10, - }, - want: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := shouldRetryShutdown(tt.args.err, tt.args.retries, tt.args.maxRetries) - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/ec/ecresource/deploymentresource/deployment/v1/deployment.go b/ec/ecresource/deploymentresource/deployment/v1/deployment.go new file mode 100644 index 000000000..68ace68dc --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment/v1/deployment.go @@ -0,0 +1,71 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" + + apmv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v1" + elasticsearchv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v1" + enterprisesearchv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v1" + integrationsserverv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/integrationsserver/v1" + kibanav1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v1" + observabilityv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v1" +) + +type DeploymentTF struct { + Id types.String `tfsdk:"id"` + Alias types.String `tfsdk:"alias"` + Version types.String `tfsdk:"version"` + Region types.String `tfsdk:"region"` + DeploymentTemplateId types.String `tfsdk:"deployment_template_id"` + Name types.String `tfsdk:"name"` + RequestId types.String `tfsdk:"request_id"` + ElasticsearchUsername types.String `tfsdk:"elasticsearch_username"` + ElasticsearchPassword types.String `tfsdk:"elasticsearch_password"` + ApmSecretToken types.String `tfsdk:"apm_secret_token"` + TrafficFilter types.Set `tfsdk:"traffic_filter"` + Tags types.Map `tfsdk:"tags"` + Elasticsearch types.List `tfsdk:"elasticsearch"` + Kibana types.List `tfsdk:"kibana"` + Apm types.List `tfsdk:"apm"` + IntegrationsServer types.List `tfsdk:"integrations_server"` + EnterpriseSearch types.List `tfsdk:"enterprise_search"` + Observability types.List `tfsdk:"observability"` +} + +type Deployment struct { + Id string `tfsdk:"id"` + Alias string `tfsdk:"alias"` + Version string `tfsdk:"version"` + Region string `tfsdk:"region"` + DeploymentTemplateId string `tfsdk:"deployment_template_id"` + Name string `tfsdk:"name"` + RequestId string `tfsdk:"request_id"` + ElasticsearchUsername string `tfsdk:"elasticsearch_username"` + ElasticsearchPassword string `tfsdk:"elasticsearch_password"` + ApmSecretToken *string `tfsdk:"apm_secret_token"` + TrafficFilter []string `tfsdk:"traffic_filter"` + Tags map[string]string `tfsdk:"tags"` + Elasticsearch elasticsearchv1.Elasticsearches `tfsdk:"elasticsearch"` + Kibana kibanav1.Kibanas `tfsdk:"kibana"` + Apm apmv1.Apms `tfsdk:"apm"` + IntegrationsServer integrationsserverv1.IntegrationsServers `tfsdk:"integrations_server"` + EnterpriseSearch enterprisesearchv1.EnterpriseSearches `tfsdk:"enterprise_search"` + Observability observabilityv1.Observabilities `tfsdk:"observability"` +} diff --git a/ec/ecresource/deploymentresource/deployment/v1/schema.go b/ec/ecresource/deploymentresource/deployment/v1/schema.go new file mode 100644 index 000000000..44357a4d2 --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment/v1/schema.go @@ -0,0 +1,133 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + + apmv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v1" + elasticsearchv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v1" + enterprisesearchv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v1" + integrationsserverv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/integrationsserver/v1" + kibanav1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v1" + observabilityv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v1" +) + +func DeploymentSchema() tfsdk.Schema { + return tfsdk.Schema{ + Version: 1, + // This description is used by the documentation generator and the language server. + MarkdownDescription: "Elastic Cloud Deployment resource", + + Attributes: map[string]tfsdk.Attribute{ + "id": { + Type: types.StringType, + Computed: true, + MarkdownDescription: "Unique identifier of this resource.", + // PlanModifiers: tfsdk.AttributePlanModifiers{ + // resource.UseStateForUnknown(), + // }, + }, + "alias": { + Type: types.StringType, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "version": { + Type: types.StringType, + Description: "Required Elastic Stack version to use for all of the deployment resources", + Required: true, + }, + "region": { + Type: types.StringType, + Description: `Required ESS region where to create the deployment, for ECE environments "ece-region" must be set`, + Required: true, + }, + "deployment_template_id": { + Type: types.StringType, + Description: "Required Deployment Template identifier to create the deployment from", + Required: true, + }, + "name": { + Type: types.StringType, + Description: "Optional name for the deployment", + Optional: true, + }, + "request_id": { + Type: types.StringType, + Description: "Optional request_id to set on the create operation, only use when previous create attempts return with an error and a request_id is returned as part of the error", + Optional: true, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "elasticsearch_username": { + Type: types.StringType, + Description: "Computed username obtained upon creating the Elasticsearch resource", + Computed: true, + // PlanModifiers: tfsdk.AttributePlanModifiers{ + // resource.UseStateForUnknown(), + // }, + }, + "elasticsearch_password": { + Type: types.StringType, + Description: "Computed password obtained upon creating the Elasticsearch resource", + Computed: true, + Sensitive: true, + // PlanModifiers: tfsdk.AttributePlanModifiers{ + // resource.UseStateForUnknown(), + // }, + }, + "apm_secret_token": { + Type: types.StringType, + Computed: true, + Sensitive: true, + // PlanModifiers: tfsdk.AttributePlanModifiers{ + // // resource.UseStateForUnknown(), + // planmodifier.UseStateForNoChange(), + // }, + }, + "traffic_filter": { + Type: types.SetType{ + ElemType: types.StringType, + }, + Optional: true, + Description: "Optional list of traffic filters to apply to this deployment.", + }, + "tags": { + Description: "Optional map of deployment tags", + Type: types.MapType{ + ElemType: types.StringType, + }, + Optional: true, + }, + "elasticsearch": elasticsearchv1.ElasticsearchSchema(), + "kibana": kibanav1.KibanaSchema(), + "apm": apmv1.ApmSchema(), + "integrations_server": integrationsserverv1.IntegrationsServerSchema(), + "enterprise_search": enterprisesearchv1.EnterpriseSearchSchema(), + "observability": observabilityv1.ObservabilitySchema(), + }, + } +} diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment.go b/ec/ecresource/deploymentresource/deployment/v2/deployment.go new file mode 100644 index 000000000..d4cdc0853 --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment.go @@ -0,0 +1,566 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "fmt" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deptemplateapi" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/esremoteclustersapi" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + + apmv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v2" + elasticsearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" + enterprisesearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v2" + integrationsserverv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/integrationsserver/v2" + kibanav2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v2" + observabilityv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v2" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DeploymentTF struct { + Id types.String `tfsdk:"id"` + Alias types.String `tfsdk:"alias"` + Version types.String `tfsdk:"version"` + Region types.String `tfsdk:"region"` + DeploymentTemplateId types.String `tfsdk:"deployment_template_id"` + Name types.String `tfsdk:"name"` + RequestId types.String `tfsdk:"request_id"` + ElasticsearchUsername types.String `tfsdk:"elasticsearch_username"` + ElasticsearchPassword types.String `tfsdk:"elasticsearch_password"` + ApmSecretToken types.String `tfsdk:"apm_secret_token"` + TrafficFilter types.Set `tfsdk:"traffic_filter"` + Tags types.Map `tfsdk:"tags"` + Elasticsearch types.Object `tfsdk:"elasticsearch"` + Kibana types.Object `tfsdk:"kibana"` + Apm types.Object `tfsdk:"apm"` + IntegrationsServer types.Object `tfsdk:"integrations_server"` + EnterpriseSearch types.Object `tfsdk:"enterprise_search"` + Observability types.Object `tfsdk:"observability"` +} + +type Deployment struct { + Id string `tfsdk:"id"` + Alias string `tfsdk:"alias"` + Version string `tfsdk:"version"` + Region string `tfsdk:"region"` + DeploymentTemplateId string `tfsdk:"deployment_template_id"` + Name string `tfsdk:"name"` + RequestId string `tfsdk:"request_id"` + ElasticsearchUsername string `tfsdk:"elasticsearch_username"` + ElasticsearchPassword string `tfsdk:"elasticsearch_password"` + ApmSecretToken *string `tfsdk:"apm_secret_token"` + TrafficFilter []string `tfsdk:"traffic_filter"` + Tags map[string]string `tfsdk:"tags"` + Elasticsearch *elasticsearchv2.Elasticsearch `tfsdk:"elasticsearch"` + Kibana *kibanav2.Kibana `tfsdk:"kibana"` + Apm *apmv2.Apm `tfsdk:"apm"` + IntegrationsServer *integrationsserverv2.IntegrationsServer `tfsdk:"integrations_server"` + EnterpriseSearch *enterprisesearchv2.EnterpriseSearch `tfsdk:"enterprise_search"` + Observability *observabilityv2.Observability `tfsdk:"observability"` +} + +// Nullify Elasticsearch topologies that have zero size and are not specified in plan +func (dep *Deployment) NullifyNotUsedEsTopologies(ctx context.Context, esPlan *elasticsearchv2.ElasticsearchTF) { + if dep.Elasticsearch == nil { + return + } + + if esPlan == nil { + return + } + + dep.Elasticsearch.HotTier = nullifyUnspecifiedZeroSizedTier(esPlan.HotContentTier, dep.Elasticsearch.HotTier) + + dep.Elasticsearch.WarmTier = nullifyUnspecifiedZeroSizedTier(esPlan.WarmTier, dep.Elasticsearch.WarmTier) + + dep.Elasticsearch.ColdTier = nullifyUnspecifiedZeroSizedTier(esPlan.ColdTier, dep.Elasticsearch.ColdTier) + + dep.Elasticsearch.FrozenTier = nullifyUnspecifiedZeroSizedTier(esPlan.FrozenTier, dep.Elasticsearch.FrozenTier) + + dep.Elasticsearch.MlTier = nullifyUnspecifiedZeroSizedTier(esPlan.MlTier, dep.Elasticsearch.MlTier) + + dep.Elasticsearch.MasterTier = nullifyUnspecifiedZeroSizedTier(esPlan.MasterTier, dep.Elasticsearch.MasterTier) + + dep.Elasticsearch.CoordinatingTier = nullifyUnspecifiedZeroSizedTier(esPlan.CoordinatingTier, dep.Elasticsearch.CoordinatingTier) +} + +func nullifyUnspecifiedZeroSizedTier(tierPlan types.Object, tier *elasticsearchv2.ElasticsearchTopology) *elasticsearchv2.ElasticsearchTopology { + + if tierPlan.IsNull() && tier != nil { + + size, err := converters.ParseTopologySize(tier.Size, tier.SizeResource) + + // we can ignore returning an error here - it's handled in readers + if err == nil && size != nil && size.Value != nil && *size.Value == 0 { + tier = nil + } + } + + return tier +} + +func ReadDeployment(res *models.DeploymentGetResponse, remotes *models.RemoteResources, deploymentResources []*models.DeploymentResource) (*Deployment, error) { + var dep Deployment + + if res.ID == nil { + return nil, utils.MissingField("ID") + } + dep.Id = *res.ID + + dep.Alias = res.Alias + + if res.Name == nil { + return nil, utils.MissingField("Name") + } + dep.Name = *res.Name + + if res.Metadata != nil { + dep.Tags = converters.TagsToMap(res.Metadata.Tags) + } + + if res.Resources == nil { + return nil, nil + } + + templateID, err := utils.GetDeploymentTemplateID(res.Resources) + if err != nil { + return nil, err + } + + dep.DeploymentTemplateId = templateID + + dep.Region = utils.GetRegion(res.Resources) + + // We're reconciling the version and storing the lowest version of any + // of the deployment resources. This ensures that if an upgrade fails, + // the state version will be lower than the desired version, making + // retries possible. Once more resource types are added, the function + // needs to be modified to check those as well. + version, err := utils.GetLowestVersion(res.Resources) + if err != nil { + // This code path is highly unlikely, but we're bubbling up the + // error in case one of the versions isn't parseable by semver. + return nil, fmt.Errorf("failed reading deployment: %w", err) + } + dep.Version = version + + dep.Elasticsearch, err = elasticsearchv2.ReadElasticsearches(res.Resources.Elasticsearch, remotes) + if err != nil { + return nil, err + } + + if dep.Kibana, err = kibanav2.ReadKibanas(res.Resources.Kibana); err != nil { + return nil, err + } + + if dep.Apm, err = apmv2.ReadApms(res.Resources.Apm); err != nil { + return nil, err + } + + if dep.IntegrationsServer, err = integrationsserverv2.ReadIntegrationsServers(res.Resources.IntegrationsServer); err != nil { + return nil, err + } + + if dep.EnterpriseSearch, err = enterprisesearchv2.ReadEnterpriseSearches(res.Resources.EnterpriseSearch); err != nil { + return nil, err + } + + if dep.TrafficFilter, err = ReadTrafficFilters(res.Settings); err != nil { + return nil, err + } + + if dep.Observability, err = observabilityv2.ReadObservability(res.Settings); err != nil { + return nil, err + } + + if err := dep.parseCredentials(deploymentResources); err != nil { + return nil, err + } + + return &dep, nil +} + +func (dep DeploymentTF) CreateRequest(ctx context.Context, client *api.API) (*models.DeploymentCreateRequest, diag.Diagnostics) { + var result = models.DeploymentCreateRequest{ + Name: dep.Name.Value, + Alias: dep.Alias.Value, + Resources: &models.DeploymentCreateResources{}, + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{}, + } + + dtID := dep.DeploymentTemplateId.Value + version := dep.Version.Value + + var diagsnostics diag.Diagnostics + + template, err := deptemplateapi.Get(deptemplateapi.GetParams{ + API: client, + TemplateID: dtID, + Region: dep.Region.Value, + HideInstanceConfigurations: true, + }) + if err != nil { + diagsnostics.AddError("Deployment template get error", err.Error()) + return nil, diagsnostics + } + + useNodeRoles, err := utils.CompatibleWithNodeRoles(version) + if err != nil { + diagsnostics.AddError("Deployment parse error", err.Error()) + return nil, diagsnostics + } + + elasticsearchPayload, diags := elasticsearchv2.ElasticsearchPayload(ctx, dep.Elasticsearch, template, dtID, version, useNodeRoles, false) + + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if elasticsearchPayload != nil { + result.Resources.Elasticsearch = []*models.ElasticsearchPayload{elasticsearchPayload} + } + + kibanaPayload, diags := kibanav2.KibanaPayload(ctx, dep.Kibana, template) + + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if kibanaPayload != nil { + result.Resources.Kibana = []*models.KibanaPayload{kibanaPayload} + } + + apmPayload, diags := apmv2.ApmPayload(ctx, dep.Apm, template) + + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if apmPayload != nil { + result.Resources.Apm = []*models.ApmPayload{apmPayload} + } + + integrationsServerPayload, diags := integrationsserverv2.IntegrationsServerPayload(ctx, dep.IntegrationsServer, template) + + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if integrationsServerPayload != nil { + result.Resources.IntegrationsServer = []*models.IntegrationsServerPayload{integrationsServerPayload} + } + + enterpriseSearchPayload, diags := enterprisesearchv2.EnterpriseSearchesPayload(ctx, dep.EnterpriseSearch, template) + + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if enterpriseSearchPayload != nil { + result.Resources.EnterpriseSearch = []*models.EnterpriseSearchPayload{enterpriseSearchPayload} + } + + if diags := TrafficFilterToModel(ctx, dep.TrafficFilter, &result); diags.HasError() { + diagsnostics.Append(diags...) + } + + observabilityPayload, diags := observabilityv2.ObservabilityPayload(ctx, dep.Observability, client) + + if diags.HasError() { + diagsnostics.Append(diags...) + } + + result.Settings.Observability = observabilityPayload + + result.Metadata.Tags, diags = converters.TFmapToTags(ctx, dep.Tags) + + if diags.HasError() { + diagsnostics.Append(diags...) + } + + return &result, diagsnostics +} + +func ReadTrafficFilters(in *models.DeploymentSettings) ([]string, error) { + if in == nil || in.TrafficFilterSettings == nil || len(in.TrafficFilterSettings.Rulesets) == 0 { + return nil, nil + } + + var rules []string + + return append(rules, in.TrafficFilterSettings.Rulesets...), nil +} + +// TrafficFilterToModel expands the flattened "traffic_filter" settings to a DeploymentCreateRequest. +func TrafficFilterToModel(ctx context.Context, set types.Set, req *models.DeploymentCreateRequest) diag.Diagnostics { + if len(set.Elems) == 0 || req == nil { + return nil + } + + if req.Settings == nil { + req.Settings = &models.DeploymentCreateSettings{} + } + + if req.Settings.TrafficFilterSettings == nil { + req.Settings.TrafficFilterSettings = &models.TrafficFilterSettings{} + } + + var rulesets []string + if diags := tfsdk.ValueAs(ctx, set, &rulesets); diags.HasError() { + return diags + } + + req.Settings.TrafficFilterSettings.Rulesets = append( + req.Settings.TrafficFilterSettings.Rulesets, + rulesets..., + ) + + return nil +} + +// parseCredentials parses the Create or Update response Resources populating +// credential settings in the Terraform state if the keys are found, currently +// populates the following credentials in plain text: +// * Elasticsearch username and Password +func (dep *Deployment) parseCredentials(resources []*models.DeploymentResource) error { + for _, res := range resources { + + if creds := res.Credentials; creds != nil { + if creds.Username != nil && *creds.Username != "" { + dep.ElasticsearchUsername = *creds.Username + } + + if creds.Password != nil && *creds.Password != "" { + dep.ElasticsearchPassword = *creds.Password + } + } + + if res.SecretToken != "" { + dep.ApmSecretToken = &res.SecretToken + } + } + + return nil +} + +func (dep *Deployment) ProcessSelfInObservability() { + + if dep.Observability == nil { + return + } + + if dep.Observability.DeploymentId == nil { + return + } + + if *dep.Observability.DeploymentId == dep.Id { + *dep.Observability.DeploymentId = "self" + } +} + +func (dep *Deployment) SetCredentialsIfEmpty(state *DeploymentTF) { + if state == nil { + return + } + + if dep.ElasticsearchPassword == "" && state.ElasticsearchPassword.Value != "" { + dep.ElasticsearchPassword = state.ElasticsearchPassword.Value + } + + if dep.ElasticsearchUsername == "" && state.ElasticsearchUsername.Value != "" { + dep.ElasticsearchUsername = state.ElasticsearchUsername.Value + } + + if (dep.ApmSecretToken == nil || *dep.ApmSecretToken == "") && state.ApmSecretToken.Value != "" { + dep.ApmSecretToken = &state.ApmSecretToken.Value + } +} + +func (plan DeploymentTF) UpdateRequest(ctx context.Context, client *api.API, state DeploymentTF) (*models.DeploymentUpdateRequest, diag.Diagnostics) { + var result = models.DeploymentUpdateRequest{ + Name: plan.Name.Value, + Alias: plan.Alias.Value, + PruneOrphans: ec.Bool(true), + Resources: &models.DeploymentUpdateResources{}, + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{}, + } + + dtID := plan.DeploymentTemplateId.Value + + var diagsnostics diag.Diagnostics + + template, err := deptemplateapi.Get(deptemplateapi.GetParams{ + API: client, + TemplateID: dtID, + Region: plan.Region.Value, + HideInstanceConfigurations: true, + }) + if err != nil { + diagsnostics.AddError("Deployment template get error", err.Error()) + return nil, diagsnostics + } + + // When the deployment template is changed, we need to skip the missing + // resource topologies to account for a new instance_configuration_id and + // a different default value. + skipEStopologies := plan.DeploymentTemplateId.Value != "" && plan.DeploymentTemplateId.Value != state.DeploymentTemplateId.Value && state.DeploymentTemplateId.Value != "" + // If the deployment_template_id is changed, then we skip updating the + // Elasticsearch topology to account for the case where the + // instance_configuration_id changes, i.e. Hot / Warm, etc. + // This might not be necessary going forward as we move to + // tiered Elasticsearch nodes. + + useNodeRoles, diags := utils.UseNodeRoles(state.Version, plan.Version) + + if diags.HasError() { + return nil, diags + } + + elasticsearchPayload, diags := elasticsearchv2.ElasticsearchPayload(ctx, plan.Elasticsearch, template, dtID, plan.Version.Value, useNodeRoles, skipEStopologies) + + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if elasticsearchPayload != nil { + // if the restore snapshot operation has been specified, the snapshot restore + // can't be full once the cluster has been created, so the Strategy must be set + // to "partial". + ensurePartialSnapshotStrategy(elasticsearchPayload) + + result.Resources.Elasticsearch = append(result.Resources.Elasticsearch, elasticsearchPayload) + } + + kibanaPayload, diags := kibanav2.KibanaPayload(ctx, plan.Kibana, template) + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if kibanaPayload != nil { + result.Resources.Kibana = append(result.Resources.Kibana, kibanaPayload) + } + + apmPayload, diags := apmv2.ApmPayload(ctx, plan.Apm, template) + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if apmPayload != nil { + result.Resources.Apm = append(result.Resources.Apm, apmPayload) + } + + integrationsServerPayload, diags := integrationsserverv2.IntegrationsServerPayload(ctx, plan.IntegrationsServer, template) + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if integrationsServerPayload != nil { + result.Resources.IntegrationsServer = append(result.Resources.IntegrationsServer, integrationsServerPayload) + } + + enterpriseSearchPayload, diags := enterprisesearchv2.EnterpriseSearchesPayload(ctx, plan.EnterpriseSearch, template) + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if enterpriseSearchPayload != nil { + result.Resources.EnterpriseSearch = append(result.Resources.EnterpriseSearch, enterpriseSearchPayload) + } + + observabilityPayload, diags := observabilityv2.ObservabilityPayload(ctx, plan.Observability, client) + if diags.HasError() { + diagsnostics.Append(diags...) + } + result.Settings.Observability = observabilityPayload + + // In order to stop shipping logs and metrics, an empty Observability + // object must be passed, as opposed to a nil object when creating a + // deployment without observability settings. + if plan.Observability.IsNull() && !state.Observability.IsNull() { + result.Settings.Observability = &models.DeploymentObservabilitySettings{} + } + + result.Metadata.Tags, diags = converters.TFmapToTags(ctx, plan.Tags) + if diags.HasError() { + diagsnostics.Append(diags...) + } + + return &result, diagsnostics +} + +func ensurePartialSnapshotStrategy(es *models.ElasticsearchPayload) { + transient := es.Plan.Transient + if transient == nil || transient.RestoreSnapshot == nil { + return + } + transient.RestoreSnapshot.Strategy = "partial" +} + +// func HandleRemoteClusters(ctx context.Context, client *api.API, newState, oldState DeploymentTF) diag.Diagnostics { +func HandleRemoteClusters(ctx context.Context, client *api.API, deploymentId string, esObj types.Object) diag.Diagnostics { + remoteClusters, refId, diags := ElasticsearchRemoteClustersPayload(ctx, client, deploymentId, esObj) + + if diags.HasError() { + return diags + } + + if err := esremoteclustersapi.Update(esremoteclustersapi.UpdateParams{ + API: client, + DeploymentID: deploymentId, + RefID: refId, + RemoteResources: remoteClusters, + }); err != nil { + diags.AddError("cannot update remote clusters", err.Error()) + return diags + } + + return nil +} + +func ElasticsearchRemoteClustersPayload(ctx context.Context, client *api.API, deploymentId string, esObj types.Object) (*models.RemoteResources, string, diag.Diagnostics) { + var es *elasticsearchv2.ElasticsearchTF + + diags := tfsdk.ValueAs(ctx, esObj, &es) + + if diags.HasError() { + return nil, "", diags + } + + if es == nil { + var diags diag.Diagnostics + diags.AddError("failed create remote clusters payload", "there is no elasticsearch") + return nil, "", diags + } + + remoteRes, diags := elasticsearchv2.ElasticsearchRemoteClustersPayload(ctx, es.RemoteCluster) + if diags.HasError() { + return nil, "", diags + } + + return remoteRes, es.RefId.Value, nil +} diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload_test.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload_test.go new file mode 100644 index 000000000..2f3f16aea --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload_test.go @@ -0,0 +1,3160 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "bytes" + "context" + "io" + "os" + "testing" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + apmv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v2" + elasticsearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" + enterprisesearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v2" + kibanav2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v2" + observabilityv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v2" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/testutil" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/stretchr/testify/assert" +) + +func fileAsResponseBody(t *testing.T, name string) io.ReadCloser { + t.Helper() + f, err := os.Open(name) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + var buf = new(bytes.Buffer) + if _, err := io.Copy(buf, f); err != nil { + t.Fatal(err) + } + buf.WriteString("\n") + + return io.NopCloser(buf) +} + +func Test_createRequest(t *testing.T) { + defaultHotTier := elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ) + + defaultElasticsearch := &elasticsearchv2.Elasticsearch{ + HotTier: defaultHotTier, + } + + sampleKibana := &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + Size: ec.String("1g"), + ZoneCount: 1, + } + + sampleApm := &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + Config: &apmv2.ApmConfig{ + DebugEnabled: ec.Bool(false), + }, + InstanceConfigurationId: ec.String("aws.apm.r5d"), + Size: ec.String("0.5g"), + ZoneCount: 1, + } + + sampleEnterpriseSearch := &enterprisesearchv2.EnterpriseSearch{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-enterprise_search"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + InstanceConfigurationId: ec.String("aws.enterprisesearch.m5d"), + Size: ec.String("2g"), + ZoneCount: 1, + NodeTypeAppserver: ec.Bool(true), + NodeTypeConnector: ec.Bool(true), + NodeTypeWorker: ec.Bool(true), + } + + sampleObservability := &observabilityv2.Observability{ + DeploymentId: ec.String(mock.ValidClusterID), + RefId: ec.String("main-elasticsearch"), + Logs: true, + Metrics: true, + } + + sampleDeployment := Deployment{ + Alias: "my-deployment", + Name: "my_deployment_name", + DeploymentTemplateId: "aws-hot-warm-v2", + Region: "us-east-1", + Version: "7.11.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + Config: &elasticsearchv2.ElasticsearchConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), + }, + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("2g"), + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + ZoneCount: 1, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + WarmTier: elasticsearchv2.CreateTierForTest( + "warm", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("2g"), + NodeRoles: []string{ + "data_warm", + "remote_cluster_client", + }, + ZoneCount: 1, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + Kibana: sampleKibana, + Apm: sampleApm, + EnterpriseSearch: sampleEnterpriseSearch, + Observability: sampleObservability, + TrafficFilter: []string{"0.0.0.0/0", "192.168.10.0/24"}, + } + + sampleElasticsearch := &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + Config: &elasticsearchv2.ElasticsearchConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), + }, + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("aws.data.highio.i3"), + Size: ec.String("2g"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 1, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + } + + sampleLegacyDeployment := Deployment{ + Alias: "my-deployment", + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.7.0", + Elasticsearch: sampleElasticsearch, + Kibana: sampleKibana, + Apm: sampleApm, + EnterpriseSearch: sampleEnterpriseSearch, + Observability: sampleObservability, + TrafficFilter: []string{"0.0.0.0/0", "192.168.10.0/24"}, + } + + ioOptimizedTpl := func() io.ReadCloser { + return fileAsResponseBody(t, "../../testdata/template-aws-io-optimized-v2.json") + } + + hotWarmTpl := func() io.ReadCloser { + return fileAsResponseBody(t, "../../testdata/template-aws-hot-warm-v2.json") + } + + ccsTpl := func() io.ReadCloser { + return fileAsResponseBody(t, "../../testdata/template-aws-cross-cluster-search-v2.json") + } + + emptyTpl := func() io.ReadCloser { + return fileAsResponseBody(t, "../../testdata/template-empty.json") + } + + type args struct { + plan Deployment + client *api.API + } + tests := []struct { + name string + args args + want *models.DeploymentCreateRequest + diags diag.Diagnostics + }{ + { + name: "parses the resources", + args: args{ + plan: sampleDeployment, + client: api.NewMock( + mock.New200Response(hotWarmTpl()), + mock.New200Response( + mock.NewStructBody(models.DeploymentGetResponse{ + Healthy: ec.Bool(true), + ID: ec.String(mock.ValidClusterID), + Resources: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{{ + ID: ec.String(mock.ValidClusterID), + RefID: ec.String("main-elasticsearch"), + }}, + }, + }), + ), + ), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Alias: "my-deployment", + Settings: &models.DeploymentCreateSettings{ + TrafficFilterSettings: &models.TrafficFilterSettings{ + Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + Observability: &models.DeploymentObservabilitySettings{ + Logging: &models.DeploymentLoggingSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: &mock.ValidClusterID, + RefID: "main-elasticsearch", + }, + }, + Metrics: &models.DeploymentMetricsSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: &mock.ValidClusterID, + RefID: "main-elasticsearch", + }, + }, + }, + }, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, hotWarmTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.11.1", + UserSettingsYaml: `some.setting: value`, + UserSettingsOverrideYaml: `some.setting: value2`, + UserSettingsJSON: map[string]interface{}{ + "some.setting": "value", + }, + UserSettingsOverrideJSON: map[string]interface{}{ + "some.setting": "value2", + }, + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-hot-warm-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "hot_content", + ZoneCount: 1, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + { + ID: "warm", + ZoneCount: 1, + InstanceConfigurationID: "aws.data.highstorage.d2", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "warm"}, + }, + NodeRoles: []string{ + "data_warm", + "remote_cluster_client", + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + Kibana: []*models.KibanaPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + Apm: []*models.ApmPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-apm"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{ + SystemSettings: &models.ApmSystemSettings{ + DebugEnabled: ec.Bool(false), + }, + }, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(512), + }, + }}, + }, + }, + }, + EnterpriseSearch: []*models.EnterpriseSearchPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-enterprise_search"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "parses the legacy resources", + args: args{ + plan: sampleLegacyDeployment, + client: api.NewMock( + mock.New200Response(ioOptimizedTpl()), + mock.New200Response( + mock.NewStructBody(models.DeploymentGetResponse{ + Healthy: ec.Bool(true), + ID: ec.String(mock.ValidClusterID), + Resources: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{{ + ID: ec.String(mock.ValidClusterID), + RefID: ec.String("main-elasticsearch"), + }}, + }, + }), + ), + ), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Alias: "my-deployment", + Settings: &models.DeploymentCreateSettings{ + TrafficFilterSettings: &models.TrafficFilterSettings{ + Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + Observability: &models.DeploymentObservabilitySettings{ + Logging: &models.DeploymentLoggingSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: &mock.ValidClusterID, + RefID: "main-elasticsearch", + }, + }, + Metrics: &models.DeploymentMetricsSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: &mock.ValidClusterID, + RefID: "main-elasticsearch", + }, + }, + }, + }, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.7.0", + UserSettingsYaml: `some.setting: value`, + UserSettingsOverrideYaml: `some.setting: value2`, + UserSettingsJSON: map[string]interface{}{ + "some.setting": "value", + }, + UserSettingsOverrideJSON: map[string]interface{}{ + "some.setting": "value2", + }, + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + ZoneCount: 1, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + Ml: ec.Bool(false), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + Kibana: []*models.KibanaPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + Apm: []*models.ApmPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-apm"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{ + SystemSettings: &models.ApmSystemSettings{ + DebugEnabled: ec.Bool(false), + }, + }, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(512), + }, + }}, + }, + }, + }, + EnterpriseSearch: []*models.EnterpriseSearchPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-enterprise_search"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "parses the resources with empty declarations (IO Optimized)", + args: args{ + plan: Deployment{ + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.7.0", + Elasticsearch: defaultElasticsearch, + Kibana: &kibanav2.Kibana{}, + Apm: &apmv2.Apm{}, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{}, + TrafficFilter: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + // Ref ids are taken from template, not from defaults values in this test. + // Defaults are processed by TF during config processing. + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{ + TrafficFilterSettings: &models.TrafficFilterSettings{ + Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + }, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("es-ref-id"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.7.0", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + Kibana: []*models.KibanaPayload{ + { + ElasticsearchClusterRefID: ec.String("es-ref-id"), + Region: ec.String("us-east-1"), + RefID: ec.String("kibana-ref-id"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + Apm: []*models.ApmPayload{ + { + ElasticsearchClusterRefID: ec.String("es-ref-id"), + Region: ec.String("us-east-1"), + RefID: ec.String("apm-ref-id"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{}, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(512), + }, + }}, + }, + }, + }, + EnterpriseSearch: []*models.EnterpriseSearchPayload{ + { + ElasticsearchClusterRefID: ec.String("es-ref-id"), + Region: ec.String("us-east-1"), + RefID: ec.String("enterprise_search-ref-id"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{ + { + ZoneCount: 2, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "parses the resources with empty declarations (IO Optimized) with node_roles", + args: args{ + plan: Deployment{ + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.11.0", + Elasticsearch: defaultElasticsearch, + Kibana: &kibanav2.Kibana{}, + Apm: &apmv2.Apm{}, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{}, + TrafficFilter: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{ + TrafficFilterSettings: &models.TrafficFilterSettings{ + Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + }, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + // Ref ids are taken from template, not from defaults values in this test. + // Defaults are processed by TF during config processing. + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("es-ref-id"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.11.0", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + Kibana: []*models.KibanaPayload{ + { + ElasticsearchClusterRefID: ec.String("es-ref-id"), + Region: ec.String("us-east-1"), + RefID: ec.String("kibana-ref-id"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + Apm: []*models.ApmPayload{ + { + ElasticsearchClusterRefID: ec.String("es-ref-id"), + Region: ec.String("us-east-1"), + RefID: ec.String("apm-ref-id"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{}, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(512), + }, + }}, + }, + }, + }, + EnterpriseSearch: []*models.EnterpriseSearchPayload{ + { + ElasticsearchClusterRefID: ec.String("es-ref-id"), + Region: ec.String("us-east-1"), + RefID: ec.String("enterprise_search-ref-id"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{ + { + ZoneCount: 2, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "parses the resources with topology overrides (size)", + args: args{ + + plan: Deployment{ + Id: mock.ValidClusterID, + Alias: "my-deployment", + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.7.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("4g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + Kibana: &kibanav2.Kibana{ + RefId: ec.String("main-kibana"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + Size: ec.String("2g"), + }, + Apm: &apmv2.Apm{ + RefId: ec.String("main-apm"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + Size: ec.String("1g"), + }, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{ + RefId: ec.String("main-enterprise_search"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + Size: ec.String("4g"), + }, + TrafficFilter: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Alias: "my-deployment", + Settings: &models.DeploymentCreateSettings{ + TrafficFilterSettings: &models.TrafficFilterSettings{ + Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + }, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.7.0", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + Kibana: []*models.KibanaPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + }, + }, + }, + }, + }, + Apm: []*models.ApmPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-apm"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{}, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }}, + }, + }, + }, + EnterpriseSearch: []*models.EnterpriseSearchPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-enterprise_search"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{ + { + ZoneCount: 2, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "parses the resources with topology overrides (IC)", + args: args{ + + plan: Deployment{ + Id: mock.ValidClusterID, + Alias: "my-deployment", + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.7.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: defaultHotTier, + }, + Kibana: &kibanav2.Kibana{ + RefId: ec.String("main-kibana"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + }, + Apm: &apmv2.Apm{ + RefId: ec.String("main-apm"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + InstanceConfigurationId: ec.String("aws.apm.r5d"), + }, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{ + RefId: ec.String("main-enterprise_search"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + InstanceConfigurationId: ec.String("aws.enterprisesearch.m5d"), + }, + TrafficFilter: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Alias: "my-deployment", + Settings: &models.DeploymentCreateSettings{ + TrafficFilterSettings: &models.TrafficFilterSettings{ + Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + }, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.7.0", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + Kibana: []*models.KibanaPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + Apm: []*models.ApmPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-apm"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{}, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(512), + }, + }}, + }, + }, + }, + EnterpriseSearch: []*models.EnterpriseSearchPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-enterprise_search"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{ + { + ZoneCount: 2, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "parses the resources with empty declarations (Hot Warm)", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-hot-warm-v2", + Region: "us-east-1", + Version: "7.9.2", + Elasticsearch: defaultElasticsearch, + Kibana: &kibanav2.Kibana{}, + }, + client: api.NewMock(mock.New200Response(hotWarmTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, hotWarmTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("es-ref-id"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + Curation: nil, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Curation: nil, + Version: "7.9.2", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-hot-warm-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "hot_content", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + { + ID: "warm", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highstorage.d2", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(false), + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "warm", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + Kibana: []*models.KibanaPayload{ + { + ElasticsearchClusterRefID: ec.String("es-ref-id"), + Region: ec.String("us-east-1"), + RefID: ec.String("kibana-ref-id"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "parses the resources with empty declarations (Hot Warm) with node_roles", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-hot-warm-v2", + Region: "us-east-1", + Version: "7.12.0", + Elasticsearch: defaultElasticsearch, + Kibana: &kibanav2.Kibana{}, + }, + client: api.NewMock(mock.New200Response(hotWarmTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, hotWarmTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("es-ref-id"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + Curation: nil, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Curation: nil, + Version: "7.12.0", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-hot-warm-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "hot_content", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + { + ID: "warm", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highstorage.d2", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeRoles: []string{ + "data_warm", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "warm", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + Kibana: []*models.KibanaPayload{ + { + ElasticsearchClusterRefID: ec.String("es-ref-id"), + Region: ec.String("us-east-1"), + RefID: ec.String("kibana-ref-id"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "parses the resources with empty declarations (Hot Warm) with node_roles and extensions", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-hot-warm-v2", + Region: "us-east-1", + Version: "7.12.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: defaultHotTier, + Extension: elasticsearchv2.ElasticsearchExtensions{ + { + Name: "my-plugin", + Type: "plugin", + Url: "repo://12311234", + Version: "7.7.0", + }, + { + Name: "my-second-plugin", + Type: "plugin", + Url: "repo://12311235", + Version: "7.7.0", + }, + { + Name: "my-bundle", + Type: "bundle", + Url: "repo://1231122", + Version: "7.7.0", + }, + { + Name: "my-second-bundle", + Type: "bundle", + Url: "repo://1231123", + Version: "7.7.0", + }, + }, + }, + }, + client: api.NewMock(mock.New200Response(hotWarmTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, hotWarmTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.12.0", + UserBundles: []*models.ElasticsearchUserBundle{ + { + URL: ec.String("repo://1231122"), + Name: ec.String("my-bundle"), + ElasticsearchVersion: ec.String("7.7.0"), + }, + { + URL: ec.String("repo://1231123"), + Name: ec.String("my-second-bundle"), + ElasticsearchVersion: ec.String("7.7.0"), + }, + }, + UserPlugins: []*models.ElasticsearchUserPlugin{ + { + URL: ec.String("repo://12311234"), + Name: ec.String("my-plugin"), + ElasticsearchVersion: ec.String("7.7.0"), + }, + { + URL: ec.String("repo://12311235"), + Name: ec.String("my-second-plugin"), + ElasticsearchVersion: ec.String("7.7.0"), + }, + }, + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-hot-warm-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "hot_content", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + { + ID: "warm", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highstorage.d2", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeRoles: []string{ + "data_warm", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "warm", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + }, + }, + }, + + { + name: "deployment with autoscaling enabled", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.12.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Autoscale: ec.String("true"), + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + ColdTier: elasticsearchv2.CreateTierForTest( + "cold", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("2g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + WarmTier: elasticsearchv2.CreateTierForTest( + "warm", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("4g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(true), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.12.0", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "cold", + ZoneCount: 1, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeRoles: []string{ + "data_cold", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "cold", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(59392), + Resource: ec.String("memory"), + }, + }, + { + ID: "hot_content", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + { + ID: "warm", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeRoles: []string{ + "data_warm", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "warm", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + }, + }, + }, + + { + name: "deployment with autoscaling enabled and custom policies set", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.12.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Autoscale: ec.String("true"), + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("232g"), + }, + }, + ), + ColdTier: elasticsearchv2.CreateTierForTest( + "cold", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("2g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + WarmTier: elasticsearchv2.CreateTierForTest( + "warm", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("4g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("116g"), + }, + }, + ), + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(true), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.12.0", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "cold", + ZoneCount: 1, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeRoles: []string{ + "data_cold", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "cold", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(59392), + Resource: ec.String("memory"), + }, + }, + { + ID: "hot_content", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(237568), + Resource: ec.String("memory"), + }, + }, + { + ID: "warm", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeRoles: []string{ + "data_warm", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "warm", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + }, + }, + }, + + { + name: "deployment with dedicated master and cold tiers", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.12.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + ColdTier: elasticsearchv2.CreateTierForTest( + "cold", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("2g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + WarmTier: elasticsearchv2.CreateTierForTest( + "warm", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("4g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + MasterTier: elasticsearchv2.CreateTierForTest( + "master", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("1g"), + ZoneCount: 3, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.12.0", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "cold", + ZoneCount: 1, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeRoles: []string{ + "data_cold", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "cold", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(59392), + Resource: ec.String("memory"), + }, + }, + { + ID: "hot_content", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + { + ID: "master", + ZoneCount: 3, + InstanceConfigurationID: "aws.master.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + NodeRoles: []string{ + "master", + "remote_cluster_client", + }, + // Elasticsearch: &models.ElasticsearchConfiguration{}, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + }, + { + ID: "warm", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeRoles: []string{ + "data_warm", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "warm", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + }, + }, + }, + + { + name: "deployment with dedicated coordinating and cold tiers", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.12.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + ColdTier: elasticsearchv2.CreateTierForTest( + "cold", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("2g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + WarmTier: elasticsearchv2.CreateTierForTest( + "warm", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("4g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + CoordinatingTier: elasticsearchv2.CreateTierForTest( + "coordinating", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("2g"), + ZoneCount: 2, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.12.0", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "cold", + ZoneCount: 1, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeRoles: []string{ + "data_cold", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "cold", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(59392), + Resource: ec.String("memory"), + }, + }, + { + ID: "coordinating", + ZoneCount: 2, + InstanceConfigurationID: "aws.coordinating.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeRoles: []string{ + "ingest", + "remote_cluster_client", + }, + // Elasticsearch: &models.ElasticsearchConfiguration{}, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + }, + { + ID: "hot_content", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "master", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + { + ID: "warm", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeRoles: []string{ + "data_warm", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "warm", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + }, + }, + }, + + { + name: "deployment with dedicated coordinating, master and cold tiers", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.12.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + ColdTier: elasticsearchv2.CreateTierForTest( + "cold", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("2g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + WarmTier: elasticsearchv2.CreateTierForTest( + "warm", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("4g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + CoordinatingTier: elasticsearchv2.CreateTierForTest( + "coordinating", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("2g"), + ZoneCount: 2, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + MasterTier: elasticsearchv2.CreateTierForTest( + "master", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("1g"), + ZoneCount: 3, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.12.0", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "cold", + ZoneCount: 1, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeRoles: []string{ + "data_cold", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "cold", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(59392), + Resource: ec.String("memory"), + }, + }, + { + ID: "coordinating", + ZoneCount: 2, + InstanceConfigurationID: "aws.coordinating.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeRoles: []string{ + "ingest", + "remote_cluster_client", + }, + // Elasticsearch: &models.ElasticsearchConfiguration{}, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + }, + { + ID: "hot_content", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + { + ID: "master", + ZoneCount: 3, + InstanceConfigurationID: "aws.master.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + NodeRoles: []string{ + "master", + "remote_cluster_client", + }, + // Elasticsearch: &models.ElasticsearchConfiguration{}, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + }, + { + ID: "warm", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeRoles: []string{ + "data_warm", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "warm", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + }, + }, + }, + + { + name: "deployment with docker_image overrides", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.14.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Config: &elasticsearchv2.ElasticsearchConfig{ + DockerImage: ec.String("docker.elastic.com/elasticsearch/container:7.14.1-hash"), + }, + Autoscale: ec.String("false"), + TrustAccount: elasticsearchv2.ElasticsearchTrustAccounts{ + { + AccountId: ec.String("ANID"), + TrustAll: ec.Bool(true), + }, + }, + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + Kibana: &kibanav2.Kibana{ + RefId: ec.String("main-kibana"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + Config: &kibanav2.KibanaConfig{ + DockerImage: ec.String("docker.elastic.com/kibana/container:7.14.1-hash"), + }, + }, + Apm: &apmv2.Apm{ + RefId: ec.String("main-apm"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + Config: &apmv2.ApmConfig{ + DockerImage: ec.String("docker.elastic.com/apm/container:7.14.1-hash"), + }, + }, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{ + RefId: ec.String("main-enterprise_search"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + Config: &enterprisesearchv2.EnterpriseSearchConfig{ + DockerImage: ec.String("docker.elastic.com/enterprise_search/container:7.14.1-hash"), + }, + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + Trust: &models.ElasticsearchClusterTrustSettings{ + Accounts: []*models.AccountTrustRelationship{ + { + AccountID: ec.String("ANID"), + TrustAll: ec.Bool(true), + }, + }, + }, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.14.1", + DockerImage: "docker.elastic.com/elasticsearch/container:7.14.1-hash", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "hot_content", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + Apm: []*models.ApmPayload{{ + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{ + DockerImage: "docker.elastic.com/apm/container:7.14.1-hash", + // SystemSettings: &models.ApmSystemSettings{ + // DebugEnabled: ec.Bool(false), + // }, + }, + ClusterTopology: []*models.ApmTopologyElement{{ + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(512), + }, + ZoneCount: 1, + }}, + }, + RefID: ec.String("main-apm"), + Region: ec.String("us-east-1"), + }}, + Kibana: []*models.KibanaPayload{{ + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{ + DockerImage: "docker.elastic.com/kibana/container:7.14.1-hash", + }, + ClusterTopology: []*models.KibanaClusterTopologyElement{{ + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + ZoneCount: 1, + }}, + }, + RefID: ec.String("main-kibana"), + Region: ec.String("us-east-1"), + }}, + EnterpriseSearch: []*models.EnterpriseSearchPayload{{ + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{ + DockerImage: "docker.elastic.com/enterprise_search/container:7.14.1-hash", + }, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + ZoneCount: 2, + }}, + }, + RefID: ec.String("main-enterprise_search"), + Region: ec.String("us-east-1"), + }}, + }, + }, + }, + + { + name: "deployment with trust settings set", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.12.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Autoscale: ec.String("false"), + TrustAccount: elasticsearchv2.ElasticsearchTrustAccounts{ + { + AccountId: ec.String("ANID"), + TrustAll: ec.Bool(true), + }, + { + AccountId: ec.String("anotherID"), + TrustAll: ec.Bool(false), + TrustAllowlist: []string{"abc", "hij", "dfg"}, + }, + }, + TrustExternal: elasticsearchv2.ElasticsearchTrustExternals{ + { + RelationshipId: ec.String("external_id"), + TrustAll: ec.Bool(true), + }, + { + RelationshipId: ec.String("another_external_id"), + TrustAll: ec.Bool(false), + TrustAllowlist: []string{"abc", "dfg"}, + }, + }, + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("232g"), + }, + }, + ), + ColdTier: elasticsearchv2.CreateTierForTest( + "cold", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("2g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + WarmTier: elasticsearchv2.CreateTierForTest( + "warm", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("4g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("116g"), + }, + }, + ), + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + Trust: &models.ElasticsearchClusterTrustSettings{ + Accounts: []*models.AccountTrustRelationship{ + { + AccountID: ec.String("ANID"), + TrustAll: ec.Bool(true), + }, + { + AccountID: ec.String("anotherID"), + TrustAll: ec.Bool(false), + TrustAllowlist: []string{ + "abc", "hij", "dfg", + }, + }, + }, + External: []*models.ExternalTrustRelationship{ + { + TrustRelationshipID: ec.String("external_id"), + TrustAll: ec.Bool(true), + }, + { + TrustRelationshipID: ec.String("another_external_id"), + TrustAll: ec.Bool(false), + TrustAllowlist: []string{ + "abc", "dfg", + }, + }, + }, + }, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.12.0", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "cold", + ZoneCount: 1, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeRoles: []string{ + "data_cold", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "cold", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(59392), + Resource: ec.String("memory"), + }, + }, + { + ID: "hot_content", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(237568), + Resource: ec.String("memory"), + }, + }, + { + ID: "warm", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeRoles: []string{ + "data_warm", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "warm", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + }, + }, + }, + + { + name: "parses the resources with empty declarations (Cross Cluster Search)", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-cross-cluster-search-v2", + Region: "us-east-1", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: defaultHotTier, + }, + Kibana: &kibanav2.Kibana{ + RefId: ec.String("main-kibana"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + }, + }, + client: api.NewMock(mock.New200Response(ccsTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ccsTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{}, + Plan: &models.ElasticsearchClusterPlan{ + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.9.2", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-cross-cluster-search-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "hot_content", + ZoneCount: 1, + InstanceConfigurationID: "aws.ccs.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + })}, + Kibana: []*models.KibanaPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "parses the resources with tags", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.10.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + Tags: map[string]string{ + "aaa": "bbb", + "owner": "elastic", + "cost-center": "rnd", + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{Tags: []*models.MetadataItem{ + {Key: ec.String("aaa"), Value: ec.String("bbb")}, + {Key: ec.String("cost-center"), Value: ec.String("rnd")}, + {Key: ec.String("owner"), Value: ec.String("elastic")}, + }}, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.10.1", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + }, + }, + }, + + { + name: "handles a snapshot_source block, leaving the strategy as is", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.10.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + SnapshotSource: &elasticsearchv2.ElasticsearchSnapshotSource{ + SourceElasticsearchClusterId: "8c63b87af9e24ea49b8a4bfe550e5fe9", + }, + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + Transient: &models.TransientElasticsearchPlanConfiguration{ + RestoreSnapshot: &models.RestoreSnapshotConfiguration{ + SourceClusterID: "8c63b87af9e24ea49b8a4bfe550e5fe9", + SnapshotName: ec.String(""), + }, + }, + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.10.1", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + }, + }, + }, + + // This case we're using an empty deployment_template to ensure that + // resources not present in the template cannot be expanded, receiving + // an error instead. + { + name: "parses the resources with empty explicit declarations (Empty deployment template)", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.10.1", + Elasticsearch: defaultElasticsearch, + Kibana: &kibanav2.Kibana{}, + Apm: &apmv2.Apm{}, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{}, + }, + client: api.NewMock(mock.New200Response(emptyTpl())), + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("topology matching error", "invalid id ('hot_content'): valid topology IDs are ") + diags.AddError("kibana payload error", "kibana specified but deployment template is not configured for it. Use a different template if you wish to add kibana") + diags.AddError("apm payload error", "apm specified but deployment template is not configured for it. Use a different template if you wish to add apm") + diags.AddError("enterprise_search payload error", "enterprise_search specified but deployment template is not configured for it. Use a different template if you wish to add enterprise_search") + return diags + }(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + schema := DeploymentSchema() + + var plan DeploymentTF + diags := tfsdk.ValueFrom(context.Background(), &tt.args.plan, schema.Type(), &plan) + assert.Nil(t, diags) + + got, diags := plan.CreateRequest(context.Background(), tt.args.client) + if tt.diags != nil { + assert.Equal(t, tt.diags, diags) + } else { + assert.Nil(t, diags) + assert.NotNil(t, got) + assert.Equal(t, *tt.want, *got) + } + }) + } +} diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_parse_credentials_test.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_parse_credentials_test.go new file mode 100644 index 000000000..858e6e4d7 --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_parse_credentials_test.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "testing" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/stretchr/testify/assert" +) + +func Test_parseCredentials(t *testing.T) { + type args struct { + dep Deployment + resources []*models.DeploymentResource + } + tests := []struct { + name string + args args + want Deployment + err error + }{ + { + name: "Parses credentials", + args: args{ + dep: Deployment{}, + resources: []*models.DeploymentResource{{ + Credentials: &models.ClusterCredentials{ + Username: ec.String("my-username"), + Password: ec.String("my-password"), + }, + SecretToken: "some-secret-token", + }}, + }, + want: Deployment{ + ElasticsearchUsername: "my-username", + ElasticsearchPassword: "my-password", + ApmSecretToken: ec.String("some-secret-token"), + }, + }, + { + name: "when no credentials are passed, it doesn't overwrite them", + args: args{ + dep: Deployment{ + ElasticsearchUsername: "my-username", + ElasticsearchPassword: "my-password", + ApmSecretToken: ec.String("some-secret-token"), + }, + resources: []*models.DeploymentResource{ + {}, + }, + }, + want: Deployment{ + ElasticsearchUsername: "my-username", + ElasticsearchPassword: "my-password", + ApmSecretToken: ec.String("some-secret-token"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.args.dep.parseCredentials(tt.args.resources) + if tt.err != nil { + assert.EqualError(t, err, tt.err.Error()) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.want, tt.args.dep) + } + }) + } +} diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_read_test.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_read_test.go new file mode 100644 index 000000000..bb32bb69c --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_read_test.go @@ -0,0 +1,1568 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "testing" + + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + apmv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v2" + elasticsearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" + enterprisesearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v2" + kibanav2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v2" + observabilityv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v2" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/testutil" + "github.com/stretchr/testify/assert" +) + +func Test_readDeployment(t *testing.T) { + type args struct { + res *models.DeploymentGetResponse + remotes models.RemoteResources + } + tests := []struct { + name string + args args + want Deployment + err error + }{ + { + name: "flattens deployment resources", + want: Deployment{ + Id: mock.ValidClusterID, + Alias: "my-deployment", + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.7.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("us-east-1"), + Config: &elasticsearchv2.ElasticsearchConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), + }, + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("aws.data.highio.i3"), + Size: ec.String("2g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 1, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + Config: &apmv2.ApmConfig{ + DebugEnabled: ec.Bool(false), + }, + InstanceConfigurationId: ec.String("aws.apm.r5d"), + Size: ec.String("0.5g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-enterprise_search"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + InstanceConfigurationId: ec.String("aws.enterprisesearch.m5d"), + Size: ec.String("2g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + NodeTypeAppserver: ec.Bool(true), + NodeTypeConnector: ec.Bool(true), + NodeTypeWorker: ec.Bool(true), + }, + Observability: &observabilityv2.Observability{ + DeploymentId: ec.String(mock.ValidClusterID), + RefId: ec.String("main-elasticsearch"), + Logs: true, + Metrics: true, + }, + TrafficFilter: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + args: args{ + res: &models.DeploymentGetResponse{ + ID: &mock.ValidClusterID, + Alias: "my-deployment", + Name: ec.String("my_deployment_name"), + Settings: &models.DeploymentSettings{ + TrafficFilterSettings: &models.TrafficFilterSettings{ + Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + Observability: &models.DeploymentObservabilitySettings{ + Logging: &models.DeploymentLoggingSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: &mock.ValidClusterID, + RefID: "main-elasticsearch", + }, + }, + Metrics: &models.DeploymentMetricsSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: &mock.ValidClusterID, + RefID: "main-elasticsearch", + }, + }, + }, + }, + Resources: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{ + { + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Info: &models.ElasticsearchClusterInfo{ + Status: ec.String("started"), + ClusterID: &mock.ValidClusterID, + ClusterName: ec.String("some-name"), + Region: "us-east-1", + ElasticsearchMonitoringInfo: &models.ElasticsearchMonitoringInfo{ + DestinationClusterIds: []string{"some"}, + }, + PlanInfo: &models.ElasticsearchClusterPlansInfo{ + Current: &models.ElasticsearchClusterPlanInfo{ + Plan: &models.ElasticsearchClusterPlan{ + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.7.0", + UserSettingsYaml: `some.setting: value`, + UserSettingsOverrideYaml: `some.setting: value2`, + UserSettingsJSON: map[string]interface{}{ + "some.setting": "value", + }, + UserSettingsOverrideJSON: map[string]interface{}{ + "some.setting": "value2", + }, + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 1, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + Ml: ec.Bool(false), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }}, + }, + }, + }, + }, + }, + }, + Kibana: []*models.KibanaResourceInfo{ + { + Region: ec.String("us-east-1"), + RefID: ec.String("main-kibana"), + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Info: &models.KibanaClusterInfo{ + Status: ec.String("started"), + ClusterID: &mock.ValidClusterID, + ClusterName: ec.String("some-kibana-name"), + Region: "us-east-1", + PlanInfo: &models.KibanaClusterPlansInfo{ + Current: &models.KibanaClusterPlanInfo{ + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{ + Version: "7.7.0", + }, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + }, + }, + }, + Apm: []*models.ApmResourceInfo{{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-apm"), + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Info: &models.ApmInfo{ + Status: ec.String("started"), + ID: &mock.ValidClusterID, + Name: ec.String("some-apm-name"), + Region: "us-east-1", + PlanInfo: &models.ApmPlansInfo{ + Current: &models.ApmPlanInfo{ + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{ + Version: "7.7.0", + SystemSettings: &models.ApmSystemSettings{ + DebugEnabled: ec.Bool(false), + }, + }, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(512), + }, + }}, + }, + }, + }, + }, + }}, + EnterpriseSearch: []*models.EnterpriseSearchResourceInfo{ + { + Region: ec.String("us-east-1"), + RefID: ec.String("main-enterprise_search"), + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Info: &models.EnterpriseSearchInfo{ + Status: ec.String("started"), + ID: &mock.ValidClusterID, + Name: ec.String("some-enterprise_search-name"), + Region: "us-east-1", + PlanInfo: &models.EnterpriseSearchPlansInfo{ + Current: &models.EnterpriseSearchPlanInfo{ + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{ + Version: "7.7.0", + }, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "sets the global version to the lesser version", + args: args{ + res: &models.DeploymentGetResponse{ + ID: &mock.ValidClusterID, + Alias: "my-deployment", + Name: ec.String("my_deployment_name"), + Settings: &models.DeploymentSettings{ + TrafficFilterSettings: &models.TrafficFilterSettings{ + Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + }, + Resources: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{ + { + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Info: &models.ElasticsearchClusterInfo{ + Status: ec.String("started"), + ClusterID: &mock.ValidClusterID, + ClusterName: ec.String("some-name"), + Region: "us-east-1", + ElasticsearchMonitoringInfo: &models.ElasticsearchMonitoringInfo{ + DestinationClusterIds: []string{"some"}, + }, + PlanInfo: &models.ElasticsearchClusterPlansInfo{ + Current: &models.ElasticsearchClusterPlanInfo{ + Plan: &models.ElasticsearchClusterPlan{ + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.7.0", + UserSettingsYaml: `some.setting: value`, + UserSettingsOverrideYaml: `some.setting: value2`, + UserSettingsJSON: map[string]interface{}{ + "some.setting": "value", + }, + UserSettingsOverrideJSON: map[string]interface{}{ + "some.setting": "value2", + }, + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 1, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + Ml: ec.Bool(false), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }}, + }, + }, + }, + }, + }, + }, + Kibana: []*models.KibanaResourceInfo{ + { + Region: ec.String("us-east-1"), + RefID: ec.String("main-kibana"), + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Info: &models.KibanaClusterInfo{ + Status: ec.String("started"), + ClusterID: &mock.ValidClusterID, + ClusterName: ec.String("some-kibana-name"), + Region: "us-east-1", + PlanInfo: &models.KibanaClusterPlansInfo{ + Current: &models.KibanaClusterPlanInfo{ + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{ + Version: "7.6.2", + }, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + want: Deployment{ + Id: mock.ValidClusterID, + Alias: "my-deployment", + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.6.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("us-east-1"), + Config: &elasticsearchv2.ElasticsearchConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), + }, + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("aws.data.highio.i3"), + Size: ec.String("2g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 1, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + TrafficFilter: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + }, + + { + name: "flattens an azure plan (io-optimized)", + args: args{ + res: testutil.OpenDeploymentGet(t, "../../testdata/deployment-azure-io-optimized.json"), + }, + want: Deployment{ + Id: "123e79d8109c4a0790b0b333110bf715", + Alias: "my-deployment", + Name: "up2d", + DeploymentTemplateId: "azure-io-optimized", + Region: "azure-eastus2", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String("1238f19957874af69306787dca662154"), + Region: ec.String("azure-eastus2"), + Autoscale: ec.String("false"), + CloudID: ec.String("up2d:somecloudID"), + HttpEndpoint: ec.String("http://1238f19957874af69306787dca662154.eastus2.azure.elastic-cloud.com:9200"), + HttpsEndpoint: ec.String("https://1238f19957874af69306787dca662154.eastus2.azure.elastic-cloud.com:9243"), + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("azure.data.highio.l32sv2"), + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 2, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + Config: &elasticsearchv2.ElasticsearchConfig{}, + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String("1235cd4a4c7f464bbcfd795f3638b769"), + Region: ec.String("azure-eastus2"), + HttpEndpoint: ec.String("http://1235cd4a4c7f464bbcfd795f3638b769.eastus2.azure.elastic-cloud.com:9200"), + HttpsEndpoint: ec.String("https://1235cd4a4c7f464bbcfd795f3638b769.eastus2.azure.elastic-cloud.com:9243"), + InstanceConfigurationId: ec.String("azure.kibana.e32sv3"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: ec.String("1235d8c911b74dd6a03c2a7b37fd68ab"), + Region: ec.String("azure-eastus2"), + HttpEndpoint: ec.String("http://1235d8c911b74dd6a03c2a7b37fd68ab.apm.eastus2.azure.elastic-cloud.com:9200"), + HttpsEndpoint: ec.String("https://1235d8c911b74dd6a03c2a7b37fd68ab.apm.eastus2.azure.elastic-cloud.com:443"), + InstanceConfigurationId: ec.String("azure.apm.e32sv3"), + Size: ec.String("0.5g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + }, + + { + name: "flattens an aws plan (io-optimized)", + args: args{res: testutil.OpenDeploymentGet(t, "../../testdata/deployment-aws-io-optimized.json")}, + want: Deployment{ + Id: "123365f2805e46808d40849b1c0b266b", + Alias: "my-deployment", + Name: "up2d", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "aws-eu-central-1", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String("1239f7ee7196439ba2d105319ac5eba7"), + Region: ec.String("aws-eu-central-1"), + Autoscale: ec.String("false"), + CloudID: ec.String("up2d:someCloudID"), + HttpEndpoint: ec.String("http://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9200"), + HttpsEndpoint: ec.String("https://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9243"), + Config: &elasticsearchv2.ElasticsearchConfig{}, + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("aws.data.highio.i3"), + Size: ec.String("8g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 2, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String("123dcfda06254ca789eb287e8b73ff4c"), + Region: ec.String("aws-eu-central-1"), + HttpEndpoint: ec.String("http://123dcfda06254ca789eb287e8b73ff4c.eu-central-1.aws.cloud.es.io:9200"), + HttpsEndpoint: ec.String("https://123dcfda06254ca789eb287e8b73ff4c.eu-central-1.aws.cloud.es.io:9243"), + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: ec.String("12328579b3bf40c8b58c1a0ed5a4bd8b"), + Region: ec.String("aws-eu-central-1"), + HttpEndpoint: ec.String("http://12328579b3bf40c8b58c1a0ed5a4bd8b.apm.eu-central-1.aws.cloud.es.io:80"), + HttpsEndpoint: ec.String("https://12328579b3bf40c8b58c1a0ed5a4bd8b.apm.eu-central-1.aws.cloud.es.io:443"), + InstanceConfigurationId: ec.String("aws.apm.r5d"), + Size: ec.String("0.5g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + }, + + { + name: "flattens an aws plan with extensions (io-optimized)", + args: args{ + res: testutil.OpenDeploymentGet(t, "../../testdata/deployment-aws-io-optimized-extension.json"), + }, + want: Deployment{ + Id: "123365f2805e46808d40849b1c0b266b", + Alias: "my-deployment", + Name: "up2d", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "aws-eu-central-1", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String("1239f7ee7196439ba2d105319ac5eba7"), + Region: ec.String("aws-eu-central-1"), + Autoscale: ec.String("false"), + CloudID: ec.String("up2d:someCloudID"), + HttpEndpoint: ec.String("http://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9200"), + HttpsEndpoint: ec.String("https://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9243"), + Config: &elasticsearchv2.ElasticsearchConfig{}, + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("aws.data.highio.i3"), + Size: ec.String("8g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 2, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + Extension: elasticsearchv2.ElasticsearchExtensions{ + { + Name: "custom-bundle", + Version: "7.9.2", + Url: "http://12345", + Type: "bundle", + }, + { + Name: "custom-bundle2", + Version: "7.9.2", + Url: "http://123456", + Type: "bundle", + }, + { + Name: "custom-plugin", + Version: "7.9.2", + Url: "http://12345", + Type: "plugin", + }, + { + Name: "custom-plugin2", + Version: "7.9.2", + Url: "http://123456", + Type: "plugin", + }, + }, + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String("123dcfda06254ca789eb287e8b73ff4c"), + Region: ec.String("aws-eu-central-1"), + HttpEndpoint: ec.String("http://123dcfda06254ca789eb287e8b73ff4c.eu-central-1.aws.cloud.es.io:9200"), + HttpsEndpoint: ec.String("https://123dcfda06254ca789eb287e8b73ff4c.eu-central-1.aws.cloud.es.io:9243"), + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: ec.String("12328579b3bf40c8b58c1a0ed5a4bd8b"), + Region: ec.String("aws-eu-central-1"), + HttpEndpoint: ec.String("http://12328579b3bf40c8b58c1a0ed5a4bd8b.apm.eu-central-1.aws.cloud.es.io:80"), + HttpsEndpoint: ec.String("https://12328579b3bf40c8b58c1a0ed5a4bd8b.apm.eu-central-1.aws.cloud.es.io:443"), + InstanceConfigurationId: ec.String("aws.apm.r5d"), + Size: ec.String("0.5g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + }, + + { + name: "flattens an aws plan with trusts", + args: args{ + res: &models.DeploymentGetResponse{ + ID: ec.String("123b7b540dfc967a7a649c18e2fce4ed"), + Alias: "OH", + Name: ec.String("up2d"), + Resources: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{{ + RefID: ec.String("main-elasticsearch"), + Region: ec.String("aws-eu-central-1"), + Info: &models.ElasticsearchClusterInfo{ + Status: ec.String("running"), + PlanInfo: &models.ElasticsearchClusterPlansInfo{ + Current: &models.ElasticsearchClusterPlanInfo{ + Plan: &models.ElasticsearchClusterPlan{ + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.13.1", + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Size: &models.TopologySize{ + Value: ec.Int32(4096), + Resource: ec.String("memory"), + }, + }}, + }, + }, + }, + Settings: &models.ElasticsearchClusterSettings{ + Trust: &models.ElasticsearchClusterTrustSettings{ + Accounts: []*models.AccountTrustRelationship{ + { + AccountID: ec.String("ANID"), + TrustAll: ec.Bool(true), + }, + { + AccountID: ec.String("anotherID"), + TrustAll: ec.Bool(false), + TrustAllowlist: []string{ + "abc", "dfg", "hij", + }, + }, + }, + External: []*models.ExternalTrustRelationship{ + { + TrustRelationshipID: ec.String("external_id"), + TrustAll: ec.Bool(true), + }, + { + TrustRelationshipID: ec.String("another_external_id"), + TrustAll: ec.Bool(false), + TrustAllowlist: []string{ + "abc", "dfg", + }, + }, + }, + }, + }, + }, + }}, + }, + }, + }, + want: Deployment{ + Id: "123b7b540dfc967a7a649c18e2fce4ed", + Alias: "OH", + Name: "up2d", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "aws-eu-central-1", + Version: "7.13.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Region: ec.String("aws-eu-central-1"), + Config: &elasticsearchv2.ElasticsearchConfig{}, + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + TrustAccount: elasticsearchv2.ElasticsearchTrustAccounts{ + { + AccountId: ec.String("ANID"), + TrustAll: ec.Bool(true), + }, + { + AccountId: ec.String("anotherID"), + TrustAll: ec.Bool(false), + TrustAllowlist: []string{"abc", "dfg", "hij"}, + }, + }, + TrustExternal: elasticsearchv2.ElasticsearchTrustExternals{ + { + RelationshipId: ec.String("external_id"), + TrustAll: ec.Bool(true), + }, + { + RelationshipId: ec.String("another_external_id"), + TrustAll: ec.Bool(false), + TrustAllowlist: []string{"abc", "dfg"}, + }, + }, + }, + }, + }, + + { + name: "flattens an aws plan with topology.config set", + args: args{ + res: &models.DeploymentGetResponse{ + ID: ec.String("123b7b540dfc967a7a649c18e2fce4ed"), + Alias: "OH", + Name: ec.String("up2d"), + Resources: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{{ + RefID: ec.String("main-elasticsearch"), + Region: ec.String("aws-eu-central-1"), + Info: &models.ElasticsearchClusterInfo{ + Status: ec.String("running"), + PlanInfo: &models.ElasticsearchClusterPlansInfo{ + Current: &models.ElasticsearchClusterPlanInfo{ + Plan: &models.ElasticsearchClusterPlan{ + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.13.1", + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Size: &models.TopologySize{ + Value: ec.Int32(4096), + Resource: ec.String("memory"), + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + UserSettingsYaml: "a.setting: true", + }, + }}, + }, + }, + }, + Settings: &models.ElasticsearchClusterSettings{}, + }, + }}, + }, + }, + }, + want: Deployment{ + Id: "123b7b540dfc967a7a649c18e2fce4ed", + Alias: "OH", + Name: "up2d", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "aws-eu-central-1", + Version: "7.13.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Region: ec.String("aws-eu-central-1"), + Config: &elasticsearchv2.ElasticsearchConfig{}, + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + }, + }, + + { + name: "flattens an plan with config.docker_image set", + args: args{ + res: &models.DeploymentGetResponse{ + ID: ec.String("123b7b540dfc967a7a649c18e2fce4ed"), + Alias: "OH", + Name: ec.String("up2d"), + Resources: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{{ + RefID: ec.String("main-elasticsearch"), + Region: ec.String("aws-eu-central-1"), + Info: &models.ElasticsearchClusterInfo{ + Status: ec.String("running"), + PlanInfo: &models.ElasticsearchClusterPlansInfo{ + Current: &models.ElasticsearchClusterPlanInfo{ + Plan: &models.ElasticsearchClusterPlan{ + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.14.1", + DockerImage: "docker.elastic.com/elasticsearch/cloud:7.14.1-hash", + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Size: &models.TopologySize{ + Value: ec.Int32(4096), + Resource: ec.String("memory"), + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + UserSettingsYaml: "a.setting: true", + }, + ZoneCount: 1, + }}, + }, + }, + }, + Settings: &models.ElasticsearchClusterSettings{}, + }, + }}, + Apm: []*models.ApmResourceInfo{{ + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + RefID: ec.String("main-apm"), + Region: ec.String("aws-eu-central-1"), + Info: &models.ApmInfo{ + Status: ec.String("running"), + PlanInfo: &models.ApmPlansInfo{Current: &models.ApmPlanInfo{ + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{ + Version: "7.14.1", + DockerImage: "docker.elastic.com/apm/cloud:7.14.1-hash", + SystemSettings: &models.ApmSystemSettings{ + DebugEnabled: ec.Bool(false), + }, + }, + ClusterTopology: []*models.ApmTopologyElement{{ + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(512), + }, + ZoneCount: 1, + }}, + }, + }}, + }, + }}, + Kibana: []*models.KibanaResourceInfo{{ + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + RefID: ec.String("main-kibana"), + Region: ec.String("aws-eu-central-1"), + Info: &models.KibanaClusterInfo{ + Status: ec.String("running"), + PlanInfo: &models.KibanaClusterPlansInfo{Current: &models.KibanaClusterPlanInfo{ + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{ + Version: "7.14.1", + DockerImage: "docker.elastic.com/kibana/cloud:7.14.1-hash", + }, + ClusterTopology: []*models.KibanaClusterTopologyElement{{ + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + ZoneCount: 1, + }}, + }, + }}, + }, + }}, + EnterpriseSearch: []*models.EnterpriseSearchResourceInfo{{ + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + RefID: ec.String("main-enterprise_search"), + Region: ec.String("aws-eu-central-1"), + Info: &models.EnterpriseSearchInfo{ + Status: ec.String("running"), + PlanInfo: &models.EnterpriseSearchPlansInfo{Current: &models.EnterpriseSearchPlanInfo{ + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{ + Version: "7.14.1", + DockerImage: "docker.elastic.com/enterprise_search/cloud:7.14.1-hash", + }, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + ZoneCount: 2, + }}, + }, + }}, + }, + }}, + }, + }, + }, + want: Deployment{ + Id: "123b7b540dfc967a7a649c18e2fce4ed", + Alias: "OH", + Name: "up2d", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "aws-eu-central-1", + Version: "7.14.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Region: ec.String("aws-eu-central-1"), + Config: &elasticsearchv2.ElasticsearchConfig{ + DockerImage: ec.String("docker.elastic.com/elasticsearch/cloud:7.14.1-hash"), + }, + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + Kibana: &kibanav2.Kibana{ + RefId: ec.String("main-kibana"), + Region: ec.String("aws-eu-central-1"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + Config: &kibanav2.KibanaConfig{ + DockerImage: ec.String("docker.elastic.com/kibana/cloud:7.14.1-hash"), + }, + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + Apm: &apmv2.Apm{ + RefId: ec.String("main-apm"), + Region: ec.String("aws-eu-central-1"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + Config: &apmv2.ApmConfig{ + DockerImage: ec.String("docker.elastic.com/apm/cloud:7.14.1-hash"), + DebugEnabled: ec.Bool(false), + }, + InstanceConfigurationId: ec.String("aws.apm.r5d"), + Size: ec.String("0.5g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{ + RefId: ec.String("main-enterprise_search"), + Region: ec.String("aws-eu-central-1"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + Config: &enterprisesearchv2.EnterpriseSearchConfig{ + DockerImage: ec.String("docker.elastic.com/enterprise_search/cloud:7.14.1-hash"), + }, + InstanceConfigurationId: ec.String("aws.enterprisesearch.m5d"), + Size: ec.String("2g"), + SizeResource: ec.String("memory"), + ZoneCount: 2, + NodeTypeAppserver: ec.Bool(true), + NodeTypeConnector: ec.Bool(true), + NodeTypeWorker: ec.Bool(true), + }, + }, + }, + + { + name: "flattens an aws plan (io-optimized) with tags", + args: args{res: testutil.OpenDeploymentGet(t, "../../testdata/deployment-aws-io-optimized-tags.json")}, + want: Deployment{ + Id: "123365f2805e46808d40849b1c0b266b", + Alias: "my-deployment", + Name: "up2d", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "aws-eu-central-1", + Version: "7.9.2", + Tags: map[string]string{ + "aaa": "bbb", + "cost": "rnd", + "owner": "elastic", + }, + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String("1239f7ee7196439ba2d105319ac5eba7"), + Region: ec.String("aws-eu-central-1"), + Autoscale: ec.String("false"), + CloudID: ec.String("up2d:someCloudID"), + HttpEndpoint: ec.String("http://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9200"), + HttpsEndpoint: ec.String("https://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9243"), + Config: &elasticsearchv2.ElasticsearchConfig{}, + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("aws.data.highio.i3"), + Size: ec.String("8g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 2, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String("123dcfda06254ca789eb287e8b73ff4c"), + Region: ec.String("aws-eu-central-1"), + HttpEndpoint: ec.String("http://123dcfda06254ca789eb287e8b73ff4c.eu-central-1.aws.cloud.es.io:9200"), + HttpsEndpoint: ec.String("https://123dcfda06254ca789eb287e8b73ff4c.eu-central-1.aws.cloud.es.io:9243"), + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: ec.String("12328579b3bf40c8b58c1a0ed5a4bd8b"), + Region: ec.String("aws-eu-central-1"), + HttpEndpoint: ec.String("http://12328579b3bf40c8b58c1a0ed5a4bd8b.apm.eu-central-1.aws.cloud.es.io:80"), + HttpsEndpoint: ec.String("https://12328579b3bf40c8b58c1a0ed5a4bd8b.apm.eu-central-1.aws.cloud.es.io:443"), + InstanceConfigurationId: ec.String("aws.apm.r5d"), + Size: ec.String("0.5g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + }, + + { + name: "flattens a gcp plan (io-optimized)", + args: args{res: testutil.OpenDeploymentGet(t, "../../testdata/deployment-gcp-io-optimized.json")}, + want: Deployment{ + Id: "1239e402d6df471ea374bd68e3f91cc5", + Alias: "my-deployment", + Name: "up2d", + DeploymentTemplateId: "gcp-io-optimized", + Region: "gcp-asia-east1", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String("123695e76d914005bf90b717e668ad4b"), + Region: ec.String("gcp-asia-east1"), + Autoscale: ec.String("false"), + CloudID: ec.String("up2d:someCloudID"), + HttpEndpoint: ec.String("http://123695e76d914005bf90b717e668ad4b.asia-east1.gcp.elastic-cloud.com:9200"), + HttpsEndpoint: ec.String("https://123695e76d914005bf90b717e668ad4b.asia-east1.gcp.elastic-cloud.com:9243"), + Config: &elasticsearchv2.ElasticsearchConfig{}, + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("gcp.data.highio.1"), + Size: ec.String("8g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 2, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String("12365046781e4d729a07df64fe67c8c6"), + Region: ec.String("gcp-asia-east1"), + HttpEndpoint: ec.String("http://12365046781e4d729a07df64fe67c8c6.asia-east1.gcp.elastic-cloud.com:9200"), + HttpsEndpoint: ec.String("https://12365046781e4d729a07df64fe67c8c6.asia-east1.gcp.elastic-cloud.com:9243"), + InstanceConfigurationId: ec.String("gcp.kibana.1"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: ec.String("12307c6c304949b8a9f3682b80900879"), + Region: ec.String("gcp-asia-east1"), + HttpEndpoint: ec.String("http://12307c6c304949b8a9f3682b80900879.apm.asia-east1.gcp.elastic-cloud.com:80"), + HttpsEndpoint: ec.String("https://12307c6c304949b8a9f3682b80900879.apm.asia-east1.gcp.elastic-cloud.com:443"), + InstanceConfigurationId: ec.String("gcp.apm.1"), + Size: ec.String("0.5g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + }, + + { + name: "flattens a gcp plan with autoscale set (io-optimized)", + args: args{res: testutil.OpenDeploymentGet(t, "../../testdata/deployment-gcp-io-optimized-autoscale.json")}, + want: Deployment{ + Id: "1239e402d6df471ea374bd68e3f91cc5", + Alias: "", + Name: "up2d", + DeploymentTemplateId: "gcp-io-optimized", + Region: "gcp-asia-east1", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String("123695e76d914005bf90b717e668ad4b"), + Region: ec.String("gcp-asia-east1"), + Autoscale: ec.String("true"), + CloudID: ec.String("up2d:someCloudID"), + HttpEndpoint: ec.String("http://123695e76d914005bf90b717e668ad4b.asia-east1.gcp.elastic-cloud.com:9200"), + HttpsEndpoint: ec.String("https://123695e76d914005bf90b717e668ad4b.asia-east1.gcp.elastic-cloud.com:9243"), + Config: &elasticsearchv2.ElasticsearchConfig{}, + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("gcp.data.highio.1"), + Size: ec.String("8g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 2, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("29g"), + MaxSizeResource: ec.String("memory"), + PolicyOverrideJson: ec.String(`{"proactive_storage":{"forecast_window":"3 h"}}`), + }, + }, + ), + MlTier: elasticsearchv2.CreateTierForTest( + "ml", + elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("gcp.ml.1"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("false"), + NodeTypeIngest: ec.String("false"), + NodeTypeMaster: ec.String("false"), + NodeTypeMl: ec.String("true"), + ZoneCount: 1, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("30g"), + MaxSizeResource: ec.String("memory"), + MinSize: ec.String("1g"), + MinSizeResource: ec.String("memory"), + }, + }, + ), + MasterTier: elasticsearchv2.CreateTierForTest( + "master", + elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("gcp.master.1"), + Size: ec.String("0g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("false"), + NodeTypeIngest: ec.String("false"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 3, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + CoordinatingTier: elasticsearchv2.CreateTierForTest( + "coordinating", + elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("gcp.coordinating.1"), + Size: ec.String("0g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("false"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("false"), + NodeTypeMl: ec.String("false"), + ZoneCount: 2, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String("12365046781e4d729a07df64fe67c8c6"), + Region: ec.String("gcp-asia-east1"), + HttpEndpoint: ec.String("http://12365046781e4d729a07df64fe67c8c6.asia-east1.gcp.elastic-cloud.com:9200"), + HttpsEndpoint: ec.String("https://12365046781e4d729a07df64fe67c8c6.asia-east1.gcp.elastic-cloud.com:9243"), + InstanceConfigurationId: ec.String("gcp.kibana.1"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: ec.String("12307c6c304949b8a9f3682b80900879"), + Region: ec.String("gcp-asia-east1"), + HttpEndpoint: ec.String("http://12307c6c304949b8a9f3682b80900879.apm.asia-east1.gcp.elastic-cloud.com:80"), + HttpsEndpoint: ec.String("https://12307c6c304949b8a9f3682b80900879.apm.asia-east1.gcp.elastic-cloud.com:443"), + InstanceConfigurationId: ec.String("gcp.apm.1"), + Size: ec.String("0.5g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + }, + + { + name: "flattens a gcp plan (hot-warm)", + args: args{res: testutil.OpenDeploymentGet(t, "../../testdata/deployment-gcp-hot-warm.json")}, + want: Deployment{ + Id: "123d148423864552aa57b59929d4bf4d", + Name: "up2d-hot-warm", + DeploymentTemplateId: "gcp-hot-warm", + Region: "gcp-us-central1", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String("123e837db6ee4391bb74887be35a7a91"), + Region: ec.String("gcp-us-central1"), + Autoscale: ec.String("false"), + CloudID: ec.String("up2d-hot-warm:someCloudID"), + HttpEndpoint: ec.String("http://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9200"), + HttpsEndpoint: ec.String("https://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9243"), + Config: &elasticsearchv2.ElasticsearchConfig{}, + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("gcp.data.highio.1"), + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 2, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + WarmTier: elasticsearchv2.CreateTierForTest( + "warm", + elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("gcp.data.highstorage.1"), + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("false"), + NodeTypeMl: ec.String("false"), + ZoneCount: 2, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + CoordinatingTier: elasticsearchv2.CreateTierForTest( + "coordinating", + elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("gcp.coordinating.1"), + Size: ec.String("0g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("false"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("false"), + NodeTypeMl: ec.String("false"), + ZoneCount: 2, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String("12372cc60d284e7e96b95ad14727c23d"), + Region: ec.String("gcp-us-central1"), + HttpEndpoint: ec.String("http://12372cc60d284e7e96b95ad14727c23d.us-central1.gcp.cloud.es.io:9200"), + HttpsEndpoint: ec.String("https://12372cc60d284e7e96b95ad14727c23d.us-central1.gcp.cloud.es.io:9243"), + InstanceConfigurationId: ec.String("gcp.kibana.1"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: ec.String("1234b68b0b9347f1b49b1e01b33bf4a4"), + Region: ec.String("gcp-us-central1"), + HttpEndpoint: ec.String("http://1234b68b0b9347f1b49b1e01b33bf4a4.apm.us-central1.gcp.cloud.es.io:80"), + HttpsEndpoint: ec.String("https://1234b68b0b9347f1b49b1e01b33bf4a4.apm.us-central1.gcp.cloud.es.io:443"), + InstanceConfigurationId: ec.String("gcp.apm.1"), + Size: ec.String("0.5g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + }, + + { + name: "flattens a gcp plan (hot-warm) with node_roles", + args: args{res: testutil.OpenDeploymentGet(t, "../../testdata/deployment-gcp-hot-warm-node_roles.json")}, + want: Deployment{ + Id: "123d148423864552aa57b59929d4bf4d", + Name: "up2d-hot-warm", + DeploymentTemplateId: "gcp-hot-warm", + Region: "gcp-us-central1", + Version: "7.11.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String("123e837db6ee4391bb74887be35a7a91"), + Region: ec.String("gcp-us-central1"), + Autoscale: ec.String("false"), + CloudID: ec.String("up2d-hot-warm:someCloudID"), + HttpEndpoint: ec.String("http://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9200"), + HttpsEndpoint: ec.String("https://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9243"), + Config: &elasticsearchv2.ElasticsearchConfig{}, + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("gcp.data.highio.1"), + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + ZoneCount: 2, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + WarmTier: elasticsearchv2.CreateTierForTest( + "warm", + elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("gcp.data.highstorage.1"), + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + ZoneCount: 2, + NodeRoles: []string{ + "data_warm", + "remote_cluster_client", + }, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + MlTier: elasticsearchv2.CreateTierForTest( + "ml", + elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("gcp.ml.1"), + Size: ec.String("0g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + NodeRoles: []string{"ml", "remote_cluster_client"}, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + MasterTier: elasticsearchv2.CreateTierForTest( + "master", + elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("gcp.master.1"), + Size: ec.String("0g"), + SizeResource: ec.String("memory"), + ZoneCount: 3, + NodeRoles: []string{"master"}, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + CoordinatingTier: elasticsearchv2.CreateTierForTest( + "coordinating", + elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("gcp.coordinating.1"), + Size: ec.String("0g"), + SizeResource: ec.String("memory"), + ZoneCount: 2, + NodeRoles: []string{"ingest", "remote_cluster_client"}, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String("12372cc60d284e7e96b95ad14727c23d"), + Region: ec.String("gcp-us-central1"), + HttpEndpoint: ec.String("http://12372cc60d284e7e96b95ad14727c23d.us-central1.gcp.cloud.es.io:9200"), + HttpsEndpoint: ec.String("https://12372cc60d284e7e96b95ad14727c23d.us-central1.gcp.cloud.es.io:9243"), + InstanceConfigurationId: ec.String("gcp.kibana.1"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: ec.String("1234b68b0b9347f1b49b1e01b33bf4a4"), + Region: ec.String("gcp-us-central1"), + HttpEndpoint: ec.String("http://1234b68b0b9347f1b49b1e01b33bf4a4.apm.us-central1.gcp.cloud.es.io:80"), + HttpsEndpoint: ec.String("https://1234b68b0b9347f1b49b1e01b33bf4a4.apm.us-central1.gcp.cloud.es.io:443"), + InstanceConfigurationId: ec.String("gcp.apm.1"), + Size: ec.String("0.5g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + }, + + { + name: "flattens an aws plan (Cross Cluster Search)", + args: args{ + res: testutil.OpenDeploymentGet(t, "../../testdata/deployment-aws-ccs.json"), + remotes: models.RemoteResources{Resources: []*models.RemoteResourceRef{ + { + Alias: ec.String("alias"), + DeploymentID: ec.String("someid"), + ElasticsearchRefID: ec.String("main-elasticsearch"), + SkipUnavailable: ec.Bool(true), + }, + { + DeploymentID: ec.String("some other id"), + ElasticsearchRefID: ec.String("main-elasticsearch"), + }, + }}, + }, + want: Deployment{ + Id: "123987dee8d54505974295e07fc7d13e", + Name: "ccs", + DeploymentTemplateId: "aws-cross-cluster-search-v2", + Region: "eu-west-1", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String("1230b3ae633b4f51a432d50971f7f1c1"), + Region: ec.String("eu-west-1"), + Autoscale: ec.String("false"), + CloudID: ec.String("ccs:someCloudID"), + HttpEndpoint: ec.String("http://1230b3ae633b4f51a432d50971f7f1c1.eu-west-1.aws.found.io:9200"), + HttpsEndpoint: ec.String("https://1230b3ae633b4f51a432d50971f7f1c1.eu-west-1.aws.found.io:9243"), + Config: &elasticsearchv2.ElasticsearchConfig{}, + RemoteCluster: elasticsearchv2.ElasticsearchRemoteClusters{ + { + Alias: ec.String("alias"), + DeploymentId: ec.String("someid"), + RefId: ec.String("main-elasticsearch"), + SkipUnavailable: ec.Bool(true), + }, + { + DeploymentId: ec.String("some other id"), + RefId: ec.String("main-elasticsearch"), + }, + }, + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("aws.ccs.r5d"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String("12317425e9e14491b74ee043db3402eb"), + Region: ec.String("eu-west-1"), + HttpEndpoint: ec.String("http://12317425e9e14491b74ee043db3402eb.eu-west-1.aws.found.io:9200"), + HttpsEndpoint: ec.String("https://12317425e9e14491b74ee043db3402eb.eu-west-1.aws.found.io:9243"), + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dep, err := ReadDeployment(tt.args.res, &tt.args.remotes, nil) + if tt.err != nil { + assert.EqualError(t, err, tt.err.Error()) + } else { + assert.NoError(t, err) + assert.NotNil(t, dep) + assert.Equal(t, tt.want, *dep) + } + }) + } +} diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload_test.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload_test.go new file mode 100644 index 000000000..d062da24f --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload_test.go @@ -0,0 +1,2066 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "io" + "testing" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + apmv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v2" + enterprisesearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v2" + kibanav2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v2" + observabilityv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v2" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/testutil" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/stretchr/testify/assert" + + elasticsearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" +) + +func Test_updateResourceToModel(t *testing.T) { + defaultHotTier := elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ) + + defaultElasticsearch := &elasticsearchv2.Elasticsearch{ + HotTier: defaultHotTier, + } + + var ioOptimizedTpl = func() io.ReadCloser { + return fileAsResponseBody(t, "../../testdata/template-aws-io-optimized-v2.json") + } + + hotWarmTpl := func() io.ReadCloser { + return fileAsResponseBody(t, "../../testdata/template-aws-hot-warm-v2.json") + } + + ccsTpl := func() io.ReadCloser { + return fileAsResponseBody(t, "../../testdata/template-aws-cross-cluster-search-v2.json") + } + + emptyTpl := func() io.ReadCloser { + return fileAsResponseBody(t, "../../testdata/template-empty.json") + } + + type args struct { + plan Deployment + state *Deployment + client *api.API + } + tests := []struct { + name string + args args + want *models.DeploymentUpdateRequest + diags diag.Diagnostics + }{ + { + name: "parses the resources", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Alias: "my-deployment", + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.7.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + Config: &elasticsearchv2.ElasticsearchConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), + }, + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("aws.data.highio.i3"), + Size: ec.String("2g"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 1, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + Size: ec.String("1g"), + ZoneCount: 1, + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + Config: &apmv2.ApmConfig{ + DebugEnabled: ec.Bool(false), + }, + InstanceConfigurationId: ec.String("aws.apm.r5d"), + Size: ec.String("0.5g"), + ZoneCount: 1, + }, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-enterprise_search"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + InstanceConfigurationId: ec.String("aws.enterprisesearch.m5d"), + Size: ec.String("2g"), + ZoneCount: 1, + NodeTypeAppserver: ec.Bool(true), + NodeTypeConnector: ec.Bool(true), + NodeTypeWorker: ec.Bool(true), + }, + Observability: &observabilityv2.Observability{ + DeploymentId: ec.String(mock.ValidClusterID), + RefId: ec.String("main-elasticsearch"), + Logs: true, + Metrics: true, + }, + TrafficFilter: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + client: api.NewMock( + mock.New200Response(ioOptimizedTpl()), + mock.New200Response( + mock.NewStructBody(models.DeploymentGetResponse{ + Healthy: ec.Bool(true), + ID: ec.String(mock.ValidClusterID), + Resources: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{{ + ID: ec.String(mock.ValidClusterID), + RefID: ec.String("main-elasticsearch"), + }}, + }, + }), + ), + ), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + Alias: "my-deployment", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{ + Observability: &models.DeploymentObservabilitySettings{ + Logging: &models.DeploymentLoggingSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: &mock.ValidClusterID, + RefID: "main-elasticsearch", + }, + }, + Metrics: &models.DeploymentMetricsSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: &mock.ValidClusterID, + RefID: "main-elasticsearch", + }, + }, + }, + }, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.7.0", + UserSettingsYaml: `some.setting: value`, + UserSettingsOverrideYaml: `some.setting: value2`, + UserSettingsJSON: map[string]interface{}{ + "some.setting": "value", + }, + UserSettingsOverrideJSON: map[string]interface{}{ + "some.setting": "value2", + }, + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 1, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + Ml: ec.Bool(false), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + Kibana: []*models.KibanaPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + Apm: []*models.ApmPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-apm"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{ + SystemSettings: &models.ApmSystemSettings{ + DebugEnabled: ec.Bool(false), + }, + }, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(512), + }, + }}, + }, + }, + }, + EnterpriseSearch: []*models.EnterpriseSearchPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-enterprise_search"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "parses the resources with empty declarations", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Alias: "my-deployment", + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.7.0", + Elasticsearch: defaultElasticsearch, + Kibana: &kibanav2.Kibana{}, + Apm: &apmv2.Apm{}, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{}, + TrafficFilter: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + Alias: "my-deployment", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("es-ref-id"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.7.0", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + Kibana: []*models.KibanaPayload{ + { + ElasticsearchClusterRefID: ec.String("es-ref-id"), + Region: ec.String("us-east-1"), + RefID: ec.String("kibana-ref-id"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + Apm: []*models.ApmPayload{ + { + ElasticsearchClusterRefID: ec.String("es-ref-id"), + Region: ec.String("us-east-1"), + RefID: ec.String("apm-ref-id"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{}, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(512), + }, + }}, + }, + }, + }, + EnterpriseSearch: []*models.EnterpriseSearchPayload{ + { + ElasticsearchClusterRefID: ec.String("es-ref-id"), + Region: ec.String("us-east-1"), + RefID: ec.String("enterprise_search-ref-id"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{ + { + ZoneCount: 2, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "parses the resources with topology overrides", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Alias: "my-deployment", + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.7.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("4g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + Kibana: &kibanav2.Kibana{ + RefId: ec.String("main-kibana"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + Size: ec.String("2g"), + }, + Apm: &apmv2.Apm{ + RefId: ec.String("main-apm"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + Size: ec.String("1g"), + }, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{ + RefId: ec.String("main-enterprise_search"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + Size: ec.String("4g"), + }, + TrafficFilter: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + Alias: "my-deployment", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.7.0", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + Kibana: []*models.KibanaPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + }, + }, + }, + }, + }, + Apm: []*models.ApmPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-apm"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{}, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }}, + }, + }, + }, + EnterpriseSearch: []*models.EnterpriseSearchPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-enterprise_search"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{ + { + ZoneCount: 2, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "parses the resources with empty declarations (Hot Warm)", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-hot-warm-v2", + Region: "us-east-1", + Version: "7.9.2", + Elasticsearch: defaultElasticsearch, + Kibana: &kibanav2.Kibana{}, + }, + client: api.NewMock(mock.New200Response(hotWarmTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, hotWarmTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("es-ref-id"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + Curation: nil, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.9.2", + Curation: nil, + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-hot-warm-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "hot_content", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + { + ID: "warm", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highstorage.d2", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(false), + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "warm"}, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + Kibana: []*models.KibanaPayload{ + { + ElasticsearchClusterRefID: ec.String("es-ref-id"), + Region: ec.String("us-east-1"), + RefID: ec.String("kibana-ref-id"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "toplogy change from hot / warm to cross cluster search", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Alias: "my-deployment", + Name: "my_deployment_name", + DeploymentTemplateId: "aws-cross-cluster-search-v2", + Region: "us-east-1", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: defaultHotTier, + }, + Kibana: &kibanav2.Kibana{ + RefId: ec.String("main-kibana"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + }, + }, + state: &Deployment{ + Id: mock.ValidClusterID, + Alias: "my-deployment", + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.7.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + Config: &elasticsearchv2.ElasticsearchConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), + }, + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("aws.data.highio.i3"), + Size: ec.String("2g"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 1, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + Size: ec.String("1g"), + ZoneCount: 1, + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + Config: &apmv2.ApmConfig{ + DebugEnabled: ec.Bool(false), + }, + InstanceConfigurationId: ec.String("aws.apm.r5d"), + Size: ec.String("0.5g"), + ZoneCount: 1, + }, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-enterprise_search"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + InstanceConfigurationId: ec.String("aws.enterprisesearch.m5d"), + Size: ec.String("2g"), + ZoneCount: 1, + NodeTypeAppserver: ec.Bool(true), + NodeTypeConnector: ec.Bool(true), + NodeTypeWorker: ec.Bool(true), + }, + Observability: &observabilityv2.Observability{ + DeploymentId: ec.String(mock.ValidClusterID), + RefId: ec.String("main-elasticsearch"), + Logs: true, + Metrics: true, + }, + TrafficFilter: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + client: api.NewMock(mock.New200Response(ccsTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + Alias: "my-deployment", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{ + Observability: &models.DeploymentObservabilitySettings{}, + }, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ccsTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{}, + Plan: &models.ElasticsearchClusterPlan{ + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.9.2", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-cross-cluster-search-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + ZoneCount: 1, + InstanceConfigurationID: "aws.ccs.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }}, + }, + })}, + Kibana: []*models.KibanaPayload{{ + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }}, + }, + }, + }, + + // The behavior of this change should be: + // * Resets the Elasticsearch topology: from 16g (due to unsetTopology call on DT change). + // * Keeps the kibana toplogy size to 2g even though the topology element has been removed (saved value persists). + // * Removes all other non present resources + { + name: "topology change with sizes not default from io optimized to cross cluster search", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-cross-cluster-search-v2", + Region: "us-east-1", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: defaultHotTier, + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + }, + }, + state: &Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + CoordinatingTier: elasticsearchv2.CreateTierForTest( + "coordinating", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + Size: ec.String("2g"), + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + Size: ec.String("1g"), + }, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-enterprise_search"), + Size: ec.String("2g"), + }, + }, + client: api.NewMock(mock.New200Response(ccsTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ccsTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{}, + Plan: &models.ElasticsearchClusterPlan{ + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.9.2", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-cross-cluster-search-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + ZoneCount: 1, + InstanceConfigurationID: "aws.ccs.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + // This field's value is reset. + Value: ec.Int32(1024), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }}, + }, + })}, + Kibana: []*models.KibanaPayload{{ + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }}, + }, + }, + }, + + // The behavior of this change should be: + // * Keeps all topology sizes as they were defined (saved value persists). + { + name: "topology change with sizes not default from explicit value to empty", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: defaultHotTier, + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + }, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-enterprise_search"), + }, + }, + state: &Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + CoordinatingTier: elasticsearchv2.CreateTierForTest( + "coordinating", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + Size: ec.String("2g"), + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + Size: ec.String("1g"), + }, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-enterprise_search"), + Size: ec.String("8g"), + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.9.2", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + Kibana: []*models.KibanaPayload{{ + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }}, + Apm: []*models.ApmPayload{{ + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-apm"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{}, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(512), + }, + }}, + }, + }}, + EnterpriseSearch: []*models.EnterpriseSearchPayload{{ + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-enterprise_search"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{ + { + ZoneCount: 2, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }, + }, + }, + }}, + }, + }, + }, + + { + name: "does not migrate node_type to node_role on version upgrade that's lower than 7.10.0", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.11.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + }, + state: &Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.9.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.11.1", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(16384), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + Ml: ec.Bool(false), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + }, + }, + }, + + { + name: "migrates node_type to node_role when the existing topology element size is updated", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.10.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("32g"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + }, + state: &Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.10.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.10.1", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(32768), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + }, + }, + }, + + { + name: "migrates node_type to node_role when the existing topology element size is updated and adds warm tier", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.10.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + WarmTier: elasticsearchv2.CreateTierForTest( + "warm", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + }, + state: &Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.10.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.10.1", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(16384), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + { + ID: "warm", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "warm"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "data_warm", + "remote_cluster_client", + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + }, + }, + }, + + { + name: "enables autoscaling with the default policies", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.12.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Autoscale: ec.String("true"), + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + WarmTier: elasticsearchv2.CreateTierForTest( + "warm", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + }, + state: &Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.12.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Autoscale: ec.String("true"), + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(true), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.12.1", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(16384), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + { + ID: "warm", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "warm"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "data_warm", + "remote_cluster_client", + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + }, + }, + }, + + { + name: "parses the resources with tags", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.10.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + Tags: map[string]string{ + "aaa": "bbb", + "owner": "elastic", + "cost-center": "rnd", + }, + }, + state: &Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.10.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{Tags: []*models.MetadataItem{ + {Key: ec.String("aaa"), Value: ec.String("bbb")}, + {Key: ec.String("cost-center"), Value: ec.String("rnd")}, + {Key: ec.String("owner"), Value: ec.String("elastic")}, + }}, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.10.1", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + }, + }, + }, + + { + name: "handles a snapshot_source block adding Strategy: partial", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.10.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + SnapshotSource: &elasticsearchv2.ElasticsearchSnapshotSource{ + SourceElasticsearchClusterId: "8c63b87af9e24ea49b8a4bfe550e5fe9", + }, + }, + }, + state: &Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.10.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + Transient: &models.TransientElasticsearchPlanConfiguration{ + RestoreSnapshot: &models.RestoreSnapshotConfiguration{ + SourceClusterID: "8c63b87af9e24ea49b8a4bfe550e5fe9", + SnapshotName: ec.String(""), + Strategy: "partial", + }, + }, + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.10.1", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + }, + }, + }, + + { + name: "handles empty Elasticsearch empty config block", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.10.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.10.1", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + }, + }, + }, + + { + name: "topology change with invalid resources returns an error", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "empty-deployment-template", + Region: "us-east-1", + Version: "7.9.2", + Elasticsearch: defaultElasticsearch, + Kibana: &kibanav2.Kibana{}, + Apm: &apmv2.Apm{}, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{}, + }, + state: &Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + CoordinatingTier: elasticsearchv2.CreateTierForTest( + "coordinating", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ), + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + Size: ec.String("2g"), + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + Size: ec.String("1g"), + }, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-enterprise_search"), + Size: ec.String("8g"), + }, + }, + client: api.NewMock(mock.New200Response(emptyTpl())), + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("kibana payload error", "kibana specified but deployment template is not configured for it. Use a different template if you wish to add kibana") + diags.AddError("apm payload error", "apm specified but deployment template is not configured for it. Use a different template if you wish to add apm") + diags.AddError("enterprise_search payload error", "enterprise_search specified but deployment template is not configured for it. Use a different template if you wish to add enterprise_search") + return diags + }(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + schema := DeploymentSchema() + + var plan DeploymentTF + diags := tfsdk.ValueFrom(context.Background(), &tt.args.plan, schema.Type(), &plan) + assert.Nil(t, diags) + + state := tt.args.state + if state == nil { + state = &tt.args.plan + } + + var stateTF DeploymentTF + + diags = tfsdk.ValueFrom(context.Background(), state, schema.Type(), &stateTF) + assert.Nil(t, diags) + + got, diags := plan.UpdateRequest(context.Background(), tt.args.client, stateTF) + if tt.diags != nil { + assert.Equal(t, tt.diags, diags) + } else { + assert.Nil(t, diags) + assert.NotNil(t, got) + assert.Equal(t, *tt.want, *got) + } + }) + } +} diff --git a/ec/ecresource/deploymentresource/deployment/v2/elasticsearch_remote_cluster_payload_test.go b/ec/ecresource/deploymentresource/deployment/v2/elasticsearch_remote_cluster_payload_test.go new file mode 100644 index 000000000..2536572db --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment/v2/elasticsearch_remote_cluster_payload_test.go @@ -0,0 +1,180 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + elasticsearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" +) + +func Test_handleRemoteClusters(t *testing.T) { + type args struct { + plan Deployment + client *api.API + } + tests := []struct { + name string + args args + }{ + { + name: "returns when the resource has no remote clusters", + args: args{ + plan: Deployment{ + Id: "320b7b540dfc967a7a649c18e2fce4ed", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + }, + }, + client: api.NewMock(mock.New202ResponseAssertion( + &mock.RequestAssertion{ + Header: api.DefaultWriteMockHeaders, + Host: api.DefaultMockHost, + Path: `/api/v1/deployments/320b7b540dfc967a7a649c18e2fce4ed/elasticsearch/main-elasticsearch/remote-clusters`, + Method: "PUT", + Body: mock.NewStringBody(`{"resources":[]}` + "\n"), + }, + mock.NewStringBody("{}"), + )), + }, + }, + { + name: "read the remote clusters", + args: args{ + client: api.NewMock(mock.New202ResponseAssertion( + &mock.RequestAssertion{ + Header: api.DefaultWriteMockHeaders, + Host: api.DefaultMockHost, + Path: `/api/v1/deployments/320b7b540dfc967a7a649c18e2fce4ed/elasticsearch/main-elasticsearch/remote-clusters`, + Method: "PUT", + Body: mock.NewStringBody(`{"resources":[{"alias":"alias","deployment_id":"someid","elasticsearch_ref_id":"main-elasticsearch","skip_unavailable":true},{"alias":"alias","deployment_id":"some other id","elasticsearch_ref_id":"main-elasticsearch","skip_unavailable":false}]}` + "\n"), + }, + mock.NewStringBody("{}"), + )), + plan: Deployment{ + Name: "my_deployment_name", + Id: "320b7b540dfc967a7a649c18e2fce4ed", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.7.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + RemoteCluster: elasticsearchv2.ElasticsearchRemoteClusters{ + { + Alias: ec.String("alias"), + DeploymentId: ec.String("someid"), + RefId: ec.String("main-elasticsearch"), + SkipUnavailable: ec.Bool(true), + }, + { + Alias: ec.String("alias"), + DeploymentId: ec.String("some other id"), + RefId: ec.String("main-elasticsearch"), + SkipUnavailable: ec.Bool(false), + }, + }, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + schema := DeploymentSchema() + + var planTF DeploymentTF + + diags := tfsdk.ValueFrom(context.Background(), tt.args.plan, schema.Type(), &planTF) + assert.Nil(t, diags) + + diags = HandleRemoteClusters(context.Background(), tt.args.client, planTF.Id.Value, planTF.Elasticsearch) + assert.Nil(t, diags) + }) + } +} + +func Test_writeRemoteClusters(t *testing.T) { + type args struct { + remoteClusters elasticsearchv2.ElasticsearchRemoteClusters + } + tests := []struct { + name string + args args + want *models.RemoteResources + }{ + { + name: "wants no error or empty res", + args: args{ + remoteClusters: elasticsearchv2.ElasticsearchRemoteClusters{}, + }, + want: &models.RemoteResources{Resources: []*models.RemoteResourceRef{}}, + }, + { + name: "expands remotes", + args: args{ + remoteClusters: elasticsearchv2.ElasticsearchRemoteClusters{ + { + Alias: ec.String("alias"), + DeploymentId: ec.String("someid"), + RefId: ec.String("main-elasticsearch"), + SkipUnavailable: ec.Bool(true), + }, + { + Alias: ec.String("alias"), + DeploymentId: ec.String("some other id"), + RefId: ec.String("main-elasticsearch"), + }, + }, + }, + want: &models.RemoteResources{Resources: []*models.RemoteResourceRef{ + { + Alias: ec.String("alias"), + DeploymentID: ec.String("someid"), + ElasticsearchRefID: ec.String("main-elasticsearch"), + SkipUnavailable: ec.Bool(true), + }, + { + Alias: ec.String("alias"), + DeploymentID: ec.String("some other id"), + ElasticsearchRefID: ec.String("main-elasticsearch"), + }, + }}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var remoteClustersTF types.Set + diags := tfsdk.ValueFrom(context.Background(), tt.args.remoteClusters, elasticsearchv2.ElasticsearchRemoteClusterSchema().FrameworkType(), &remoteClustersTF) + assert.Nil(t, diags) + + got, diags := elasticsearchv2.ElasticsearchRemoteClustersPayload(context.Background(), remoteClustersTF) + assert.Nil(t, diags) + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/ec/ecresource/deploymentresource/deployment/v2/partial_stapshot_strategy_test.go b/ec/ecresource/deploymentresource/deployment/v2/partial_stapshot_strategy_test.go new file mode 100644 index 000000000..3b4f63386 --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment/v2/partial_stapshot_strategy_test.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "testing" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/stretchr/testify/assert" +) + +func Test_ensurePartialSnapshotStrategy(t *testing.T) { + type args struct { + es *models.ElasticsearchPayload + } + tests := []struct { + name string + args args + want *models.ElasticsearchPayload + }{ + { + name: "ignores resources with no transient block", + args: args{es: &models.ElasticsearchPayload{ + Plan: &models.ElasticsearchClusterPlan{}, + }}, + want: &models.ElasticsearchPayload{ + Plan: &models.ElasticsearchClusterPlan{}, + }, + }, + { + name: "ignores resources with no transient.snapshot block", + args: args{es: &models.ElasticsearchPayload{ + Plan: &models.ElasticsearchClusterPlan{ + Transient: &models.TransientElasticsearchPlanConfiguration{}, + }, + }}, + want: &models.ElasticsearchPayload{ + Plan: &models.ElasticsearchClusterPlan{ + Transient: &models.TransientElasticsearchPlanConfiguration{}, + }, + }, + }, + { + name: "Sets strategy to partial", + args: args{es: &models.ElasticsearchPayload{ + Plan: &models.ElasticsearchClusterPlan{ + Transient: &models.TransientElasticsearchPlanConfiguration{ + RestoreSnapshot: &models.RestoreSnapshotConfiguration{ + SourceClusterID: "some", + }, + }, + }, + }}, + want: &models.ElasticsearchPayload{ + Plan: &models.ElasticsearchClusterPlan{ + Transient: &models.TransientElasticsearchPlanConfiguration{ + RestoreSnapshot: &models.RestoreSnapshotConfiguration{ + SourceClusterID: "some", + Strategy: "partial", + }, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ensurePartialSnapshotStrategy(tt.args.es) + assert.Equal(t, tt.want, tt.args.es) + }) + } +} diff --git a/ec/ecresource/deploymentresource/deployment/v2/schema.go b/ec/ecresource/deploymentresource/deployment/v2/schema.go new file mode 100644 index 000000000..bed20c35d --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment/v2/schema.go @@ -0,0 +1,132 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + + apmv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v2" + elasticsearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" + enterprisesearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v2" + integrationsserverv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/integrationsserver/v2" + kibanav2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v2" + observabilityv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v2" +) + +func DeploymentSchema() tfsdk.Schema { + return tfsdk.Schema{ + Version: 2, + // This description is used by the documentation generator and the language server. + MarkdownDescription: "Elastic Cloud Deployment resource", + + Attributes: map[string]tfsdk.Attribute{ + "id": { + Type: types.StringType, + Computed: true, + MarkdownDescription: "Unique identifier of this resource.", + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "alias": { + Type: types.StringType, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "version": { + Type: types.StringType, + Description: "Required Elastic Stack version to use for all of the deployment resources", + Required: true, + }, + "region": { + Type: types.StringType, + Description: `Required ESS region where to create the deployment, for ECE environments "ece-region" must be set`, + Required: true, + }, + "deployment_template_id": { + Type: types.StringType, + Description: "Required Deployment Template identifier to create the deployment from", + Required: true, + }, + "name": { + Type: types.StringType, + Description: "Optional name for the deployment", + Optional: true, + }, + "request_id": { + Type: types.StringType, + Description: "Optional request_id to set on the create operation, only use when previous create attempts return with an error and a request_id is returned as part of the error", + Optional: true, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "elasticsearch_username": { + Type: types.StringType, + Description: "Computed username obtained upon creating the Elasticsearch resource", + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "elasticsearch_password": { + Type: types.StringType, + Description: "Computed password obtained upon creating the Elasticsearch resource", + Computed: true, + Sensitive: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "apm_secret_token": { + Type: types.StringType, + Computed: true, + Sensitive: true, + // PlanModifiers: tfsdk.AttributePlanModifiers{ + // ApmSecretTokenPlanModifier(), + // }, + }, + "traffic_filter": { + Type: types.SetType{ + ElemType: types.StringType, + }, + Optional: true, + Description: "Optional list of traffic filters to apply to this deployment.", + }, + "tags": { + Description: "Optional map of deployment tags", + Type: types.MapType{ + ElemType: types.StringType, + }, + Optional: true, + }, + "elasticsearch": elasticsearchv2.ElasticsearchSchema(), + "kibana": kibanav2.KibanaSchema(), + "apm": apmv2.ApmSchema(), + "integrations_server": integrationsserverv2.IntegrationsServerSchema(), + "enterprise_search": enterprisesearchv2.EnterpriseSearchSchema(), + "observability": observabilityv2.ObservabilitySchema(), + }, + } +} diff --git a/ec/ecresource/deploymentresource/traffic_filter_test.go b/ec/ecresource/deploymentresource/deployment/v2/traffic_filter_test.go similarity index 77% rename from ec/ecresource/deploymentresource/traffic_filter_test.go rename to ec/ecresource/deploymentresource/deployment/v2/traffic_filter_test.go index 506cc762e..2bf4b9564 100644 --- a/ec/ecresource/deploymentresource/traffic_filter_test.go +++ b/ec/ecresource/deploymentresource/deployment/v2/traffic_filter_test.go @@ -15,12 +15,14 @@ // specific language governing permissions and limitations // under the License. -package deploymentresource +package v2 import ( + "context" "testing" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" "github.com/elastic/cloud-sdk-go/pkg/models" @@ -33,7 +35,7 @@ func TestParseTrafficFiltering(t *testing.T) { tests := []struct { name string args args - want []interface{} + want []string }{ { name: "parses no rules when they're empty", @@ -67,7 +69,7 @@ func TestParseTrafficFiltering(t *testing.T) { }, }, }}, - want: []interface{}{ + want: []string{ "one-id-of-a-rule", "another-id-of-another-rule", }, @@ -75,19 +77,17 @@ func TestParseTrafficFiltering(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var gotSlice []interface{} - if got := flattenTrafficFiltering(tt.args.settings); got != nil { - gotSlice = got.List() - } - assert.Equal(t, tt.want, gotSlice) + got, err := ReadTrafficFilters(tt.args.settings) + assert.Nil(t, err) + assert.Equal(t, tt.want, got) }) } } -func Test_expandTrafficFilterCreate(t *testing.T) { +func Test_trafficFilterToModel(t *testing.T) { type args struct { - v *schema.Set - req *models.DeploymentCreateRequest + filters []string + req *models.DeploymentCreateRequest } tests := []struct { name string @@ -101,8 +101,8 @@ func Test_expandTrafficFilterCreate(t *testing.T) { { name: "parses all the traffic filtering rules", args: args{ - v: schema.NewSet(schema.HashString, []interface{}{"0.0.0.0/0", "192.168.1.0/24"}), - req: &models.DeploymentCreateRequest{}, + filters: []string{"0.0.0.0/0", "192.168.1.0/24"}, + req: &models.DeploymentCreateRequest{}, }, want: &models.DeploymentCreateRequest{Settings: &models.DeploymentCreateSettings{ TrafficFilterSettings: &models.TrafficFilterSettings{Rulesets: []string{ @@ -113,8 +113,8 @@ func Test_expandTrafficFilterCreate(t *testing.T) { { name: "parses all the traffic filtering rules", args: args{ - v: schema.NewSet(schema.HashString, []interface{}{"0.0.0.0/0", "192.168.1.0/24"}), - req: &models.DeploymentCreateRequest{Settings: &models.DeploymentCreateSettings{}}, + filters: []string{"0.0.0.0/0", "192.168.1.0/24"}, + req: &models.DeploymentCreateRequest{Settings: &models.DeploymentCreateSettings{}}, }, want: &models.DeploymentCreateRequest{Settings: &models.DeploymentCreateSettings{ TrafficFilterSettings: &models.TrafficFilterSettings{Rulesets: []string{ @@ -125,7 +125,7 @@ func Test_expandTrafficFilterCreate(t *testing.T) { { name: "parses all the traffic filtering rules", args: args{ - v: schema.NewSet(schema.HashString, []interface{}{"0.0.0.0/0", "192.168.1.0/24"}), + filters: []string{"0.0.0.0/0", "192.168.1.0/24"}, req: &models.DeploymentCreateRequest{Settings: &models.DeploymentCreateSettings{ TrafficFilterSettings: &models.TrafficFilterSettings{ Rulesets: []string{"192.168.0.0/24"}, @@ -141,15 +141,20 @@ func Test_expandTrafficFilterCreate(t *testing.T) { { name: "parses no traffic filtering rules", args: args{ - v: schema.NewSet(schema.HashString, nil), - req: &models.DeploymentCreateRequest{}, + filters: nil, + req: &models.DeploymentCreateRequest{}, }, want: &models.DeploymentCreateRequest{}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - expandTrafficFilterCreate(tt.args.v, tt.args.req) + var filters types.Set + diags := tfsdk.ValueFrom(context.Background(), tt.args.filters, types.SetType{ElemType: types.StringType}, &filters) + assert.Nil(t, diags) + + diags = TrafficFilterToModel(context.Background(), filters, tt.args.req) + assert.Nil(t, diags) assert.Equal(t, tt.want, tt.args.req) }) } diff --git a/ec/ecresource/deploymentresource/deployment_not_found_test.go b/ec/ecresource/deploymentresource/deployment_not_found_test.go new file mode 100644 index 000000000..03e493661 --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment_not_found_test.go @@ -0,0 +1,75 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package deploymentresource + +import ( + "testing" + + "github.com/go-openapi/runtime" + + "github.com/elastic/cloud-sdk-go/pkg/api/apierror" + "github.com/elastic/cloud-sdk-go/pkg/client/deployments" +) + +func Test_deploymentNotFound(t *testing.T) { + type args struct { + err error + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "When the error is empty, it returns false", + }, + { + name: "When the error is something else (500), it returns false", + args: args{ + err: &apierror.Error{Err: &runtime.APIError{Code: 500}}, + }, + }, + { + name: "When the error is something else (401), it returns false", + args: args{ + err: &apierror.Error{Err: &deployments.GetDeploymentUnauthorized{}}, + }, + }, + { + name: "When the deployment is not found, it returns true", + args: args{ + err: &apierror.Error{Err: &deployments.GetDeploymentNotFound{}}, + }, + want: true, + }, + { + name: "When the deployment is not authorized it returns true, to account for the DR case (ESS)", + args: args{ + err: &apierror.Error{Err: &runtime.APIError{Code: 403}}, + }, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := deploymentNotFound(tt.args.err); got != tt.want { + t.Errorf("deploymentNotFound() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/ec/ecresource/deploymentresource/deployment_test.go b/ec/ecresource/deploymentresource/deployment_test.go new file mode 100644 index 000000000..64e141b90 --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment_test.go @@ -0,0 +1,197 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package deploymentresource_test + +import ( + "encoding/json" + "fmt" + "io" + "net/url" + "os" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/providerserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + r "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/cloud-sdk-go/pkg/models" + + provider "github.com/elastic/terraform-provider-ec/ec" +) + +func Test_createDeploymentWithEmptyFields(t *testing.T) { + requestId := "cuchxqanal0g8rmx9ljog7qrrpd68iitulaz2mrch1vuuihetgo5ge3f6555vn4s" + + deploymentWithDefaultsIoOptimized := fmt.Sprintf(` + resource "ec_deployment" "empty-declarations-IO-Optimized" { + request_id = "%s" + name = "my_deployment_name" + deployment_template_id = "aws-io-optimized-v2" + region = "us-east-1" + version = "8.4.3" + + elasticsearch = { + config = {} + hot = { + size = "8g" + autoscaling = {} + } + } + }`, + requestId, + ) + + createDeploymentResponseJson := []byte(` + { + "alias": "my-deployment-name", + "created": true, + "id": "accd2e61fa835a5a32bb6b2938ce91f3", + "resources": [ + { + "kind": "elasticsearch", + "cloud_id": "my_deployment_name:cloud_id", + "region": "us-east-1", + "ref_id": "main-elasticsearch", + "credentials": { + "username": "elastic", + "password": "password" + }, + "id": "resource_id" + } + ], + "name": "my_deployment_name" + } + `) + + templateFileName := "testdata/aws-io-optimized-v2.json" + + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + getTemplate(t, templateFileName), + createDeployment(t, readFile(t, "testdata/aws-io-optimized-v2-empty-config-create-expected-payload.json"), createDeploymentResponseJson, requestId), + mock.New200Response(readTestData(t, "testdata/aws-io-optimized-v2-empty-config-expected-deployment1.json")), + mock.New200Response(readTestData(t, "testdata/aws-io-optimized-v2-empty-config-expected-deployment2.json")), + mock.New200Response(readTestData(t, "testdata/aws-io-optimized-v2-empty-config-expected-deployment3.json")), + mock.New200Response(readTestData(t, "testdata/aws-io-optimized-v2-empty-config-expected-deployment3.json")), + mock.New200Response(readTestData(t, "testdata/aws-io-optimized-v2-empty-config-expected-deployment3.json")), + mock.New202Response(io.NopCloser(strings.NewReader(""))), + mock.New200Response(readTestData(t, "testdata/aws-io-optimized-v2-empty-config-expected-deployment3.json")), + readRemoteClusters(t), + mock.New200Response(readTestData(t, "testdata/aws-io-optimized-v2-empty-config-expected-deployment3.json")), + readRemoteClusters(t), + shutdownDeployment(t), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: deploymentWithDefaultsIoOptimized, + }, + }, + }) +} + +func getTemplate(t *testing.T, filename string) mock.Response { + return mock.New200ResponseAssertion( + &mock.RequestAssertion{ + Host: api.DefaultMockHost, + Header: api.DefaultReadMockHeaders, + Method: "GET", + Path: "/api/v1/deployments/templates/aws-io-optimized-v2", + Query: url.Values{"region": {"us-east-1"}, "show_instance_configurations": {"false"}}, + }, + readTestData(t, filename), + ) +} + +func readFile(t *testing.T, fileName string) []byte { + t.Helper() + res, err := os.ReadFile(fileName) + if err != nil { + t.Fatalf(err.Error()) + } + return res +} + +func readTestData(t *testing.T, filename string) io.ReadCloser { + t.Helper() + f, err := os.Open(filename) + if err != nil { + t.Fatalf(err.Error()) + } + return f +} + +func createDeployment(t *testing.T, expectedRequestJson, responseJson []byte, requestId string) mock.Response { + t.Helper() + var expectedRequest *models.DeploymentCreateRequest + err := json.Unmarshal(expectedRequestJson, &expectedRequest) + if err != nil { + t.Fatalf(err.Error()) + } + + var response *models.DeploymentCreateResponse + err = json.Unmarshal(responseJson, &response) + if err != nil { + t.Fatalf(err.Error()) + } + + return mock.New201ResponseAssertion( + &mock.RequestAssertion{ + Host: api.DefaultMockHost, + Header: api.DefaultWriteMockHeaders, + Method: "POST", + Path: "/api/v1/deployments", + Query: url.Values{"request_id": {requestId}}, + Body: mock.NewStructBody(expectedRequest), + }, + mock.NewStructBody(response), + ) +} + +func shutdownDeployment(t *testing.T) mock.Response { + t.Helper() + + return mock.New201ResponseAssertion( + &mock.RequestAssertion{ + Host: api.DefaultMockHost, + Header: api.DefaultWriteMockHeaders, + Method: "POST", + Path: "/api/v1/deployments/accd2e61fa835a5a32bb6b2938ce91f3/_shutdown", + Query: url.Values{"skip_snapshot": {"false"}}, + Body: io.NopCloser(strings.NewReader("")), + }, + io.NopCloser(strings.NewReader("")), + ) +} + +func readRemoteClusters(t *testing.T) mock.Response { + + return mock.New200StructResponse( + &models.RemoteResources{Resources: []*models.RemoteResourceRef{}}, + ) +} + +func protoV6ProviderFactoriesWithMockClient(client *api.API) map[string]func() (tfprotov6.ProviderServer, error) { + return map[string]func() (tfprotov6.ProviderServer, error){ + "ec": providerserver.NewProtocol6WithError(provider.ProviderWithClient(client, "unit-tests")), + } +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch.go b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch.go new file mode 100644 index 000000000..58c04fc5d --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ElasticsearchTF struct { + Autoscale types.String `tfsdk:"autoscale"` + RefId types.String `tfsdk:"ref_id"` + ResourceId types.String `tfsdk:"resource_id"` + Region types.String `tfsdk:"region"` + CloudID types.String `tfsdk:"cloud_id"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + Topology types.List `tfsdk:"topology"` + Config types.List `tfsdk:"config"` + RemoteCluster types.Set `tfsdk:"remote_cluster"` + SnapshotSource types.List `tfsdk:"snapshot_source"` + Extension types.Set `tfsdk:"extension"` + TrustAccount types.Set `tfsdk:"trust_account"` + TrustExternal types.Set `tfsdk:"trust_external"` + Strategy types.List `tfsdk:"strategy"` +} + +type Elasticsearch struct { + Autoscale *string `tfsdk:"autoscale"` + RefId *string `tfsdk:"ref_id"` + ResourceId *string `tfsdk:"resource_id"` + Region *string `tfsdk:"region"` + CloudID *string `tfsdk:"cloud_id"` + HttpEndpoint *string `tfsdk:"http_endpoint"` + HttpsEndpoint *string `tfsdk:"https_endpoint"` + Topology ElasticsearchTopologies `tfsdk:"topology"` + Config ElasticsearchConfigs `tfsdk:"config"` + RemoteCluster ElasticsearchRemoteClusters `tfsdk:"remote_cluster"` + SnapshotSource ElasticsearchSnapshotSources `tfsdk:"snapshot_source"` + Extension ElasticsearchExtensions `tfsdk:"extension"` + TrustAccount ElasticsearchTrustAccounts `tfsdk:"trust_account"` + TrustExternal ElasticsearchTrustExternals `tfsdk:"trust_external"` + Strategy ElasticsearchStrategies `tfsdk:"strategy"` +} + +type Elasticsearches []Elasticsearch diff --git a/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_config.go b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_config.go new file mode 100644 index 000000000..d8878b9a0 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_config.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ElasticsearchConfigTF struct { + Plugins types.Set `tfsdk:"plugins"` + DockerImage types.String `tfsdk:"docker_image"` + UserSettingsJson types.String `tfsdk:"user_settings_json"` + UserSettingsOverrideJson types.String `tfsdk:"user_settings_override_json"` + UserSettingsYaml types.String `tfsdk:"user_settings_yaml"` + UserSettingsOverrideYaml types.String `tfsdk:"user_settings_override_yaml"` +} + +type ElasticsearchConfig struct { + Plugins []string `tfsdk:"plugins"` + DockerImage *string `tfsdk:"docker_image"` + UserSettingsJson *string `tfsdk:"user_settings_json"` + UserSettingsOverrideJson *string `tfsdk:"user_settings_override_json"` + UserSettingsYaml *string `tfsdk:"user_settings_yaml"` + UserSettingsOverrideYaml *string `tfsdk:"user_settings_override_yaml"` +} + +type ElasticsearchConfigs []ElasticsearchConfig diff --git a/ec/internal/flatteners/flatten_endpoint.go b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_extension.go similarity index 54% rename from ec/internal/flatteners/flatten_endpoint.go rename to ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_extension.go index 3af63a361..800c9cf60 100644 --- a/ec/internal/flatteners/flatten_endpoint.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_extension.go @@ -15,28 +15,26 @@ // specific language governing permissions and limitations // under the License. -package flatteners +package v1 import ( - "fmt" - - "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/hashicorp/terraform-plugin-framework/types" ) -// FlattenClusterEndpoint receives a ClusterMetadataInfo, parses the http and -// https endpoints and returns a map with two keys: `http_endpoint` and -// `https_endpoint` -func FlattenEndpoints(metadata *models.ClusterMetadataInfo) (httpEndpoint string, httpsEndpoint string) { - if metadata == nil || metadata.Endpoint == "" || metadata.Ports == nil { - return - } +type ElasticsearchExtensionTF struct { + Name types.String `tfsdk:"name"` + Type types.String `tfsdk:"type"` + Version types.String `tfsdk:"version"` + Url types.String `tfsdk:"url"` +} - if metadata.Ports.HTTP != nil { - httpEndpoint = fmt.Sprintf("http://%s:%d", metadata.Endpoint, *metadata.Ports.HTTP) - } +type ElasticsearchExtensionsTF types.Set - if metadata.Ports.HTTPS != nil { - httpsEndpoint = fmt.Sprintf("https://%s:%d", metadata.Endpoint, *metadata.Ports.HTTPS) - } - return +type ElasticsearchExtension struct { + Name string `tfsdk:"name"` + Type string `tfsdk:"type"` + Version string `tfsdk:"version"` + Url string `tfsdk:"url"` } + +type ElasticsearchExtensions []ElasticsearchExtension diff --git a/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_remote_cluster.go b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_remote_cluster.go new file mode 100644 index 000000000..c2da22ee3 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_remote_cluster.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ElasticsearchRemoteClusterTF struct { + DeploymentId types.String `tfsdk:"deployment_id"` + Alias types.String `tfsdk:"alias"` + RefId types.String `tfsdk:"ref_id"` + SkipUnavailable types.Bool `tfsdk:"skip_unavailable"` +} + +type ElasticsearchRemoteCluster struct { + DeploymentId *string `tfsdk:"deployment_id"` + Alias *string `tfsdk:"alias"` + RefId *string `tfsdk:"ref_id"` + SkipUnavailable *bool `tfsdk:"skip_unavailable"` +} + +type ElasticsearchRemoteClusters []ElasticsearchRemoteCluster diff --git a/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_snapshot_source.go b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_snapshot_source.go new file mode 100644 index 000000000..cfb60f616 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_snapshot_source.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ElasticsearchSnapshotSourceTF struct { + SourceElasticsearchClusterId types.String `tfsdk:"source_elasticsearch_cluster_id"` + SnapshotName types.String `tfsdk:"snapshot_name"` +} + +type ElasticsearchSnapshotSource struct { + SourceElasticsearchClusterId string `tfsdk:"source_elasticsearch_cluster_id"` + SnapshotName string `tfsdk:"snapshot_name"` +} + +type ElasticsearchSnapshotSources []ElasticsearchSnapshotSource diff --git a/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_strategy.go b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_strategy.go new file mode 100644 index 000000000..51cabca9f --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_strategy.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ElasticsearchStrategyTF struct { + Type types.String `tfsdk:"type"` +} + +type ElasticsearchStrategiesTF types.List + +type ElasticsearchStrategy struct { + Type string `tfsdk:"type"` +} + +type ElasticsearchStrategies []ElasticsearchStrategy diff --git a/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_topology.go b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_topology.go new file mode 100644 index 000000000..2b2b7c3cc --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_topology.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ElasticsearchTopologyTF struct { + Id types.String `tfsdk:"id"` + InstanceConfigurationId types.String `tfsdk:"instance_configuration_id"` + Size types.String `tfsdk:"size"` + SizeResource types.String `tfsdk:"size_resource"` + ZoneCount types.Int64 `tfsdk:"zone_count"` + NodeTypeData types.String `tfsdk:"node_type_data"` + NodeTypeMaster types.String `tfsdk:"node_type_master"` + NodeTypeIngest types.String `tfsdk:"node_type_ingest"` + NodeTypeMl types.String `tfsdk:"node_type_ml"` + NodeRoles types.Set `tfsdk:"node_roles"` + Autoscaling types.List `tfsdk:"autoscaling"` + Config types.List `tfsdk:"config"` +} + +type ElasticsearchTopology struct { + Id string `tfsdk:"id"` + InstanceConfigurationId *string `tfsdk:"instance_configuration_id"` + Size *string `tfsdk:"size"` + SizeResource *string `tfsdk:"size_resource"` + ZoneCount int `tfsdk:"zone_count"` + NodeTypeData *string `tfsdk:"node_type_data"` + NodeTypeMaster *string `tfsdk:"node_type_master"` + NodeTypeIngest *string `tfsdk:"node_type_ingest"` + NodeTypeMl *string `tfsdk:"node_type_ml"` + NodeRoles []string `tfsdk:"node_roles"` + Autoscaling ElasticsearchTopologyAutoscalings `tfsdk:"autoscaling"` + Config ElasticsearchTopologyConfigs `tfsdk:"config"` +} + +type ElasticsearchTopologies []ElasticsearchTopology diff --git a/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_topology_autoscaling.go b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_topology_autoscaling.go new file mode 100644 index 000000000..653c44e89 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_topology_autoscaling.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ElasticsearchTopologyAutoscalingTF struct { + MaxSizeResource types.String `tfsdk:"max_size_resource"` + MaxSize types.String `tfsdk:"max_size"` + MinSizeResource types.String `tfsdk:"min_size_resource"` + MinSize types.String `tfsdk:"min_size"` + PolicyOverrideJson types.String `tfsdk:"policy_override_json"` +} + +type ElasticsearchTopologyAutoscaling struct { + MaxSizeResource *string `tfsdk:"max_size_resource"` + MaxSize *string `tfsdk:"max_size"` + MinSizeResource *string `tfsdk:"min_size_resource"` + MinSize *string `tfsdk:"min_size"` + PolicyOverrideJson *string `tfsdk:"policy_override_json"` +} + +type ElasticsearchTopologyAutoscalings []ElasticsearchTopologyAutoscaling diff --git a/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_topology_config.go b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_topology_config.go new file mode 100644 index 000000000..a7fe4c800 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_topology_config.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ElasticsearchTopologyConfigTF struct { + Plugins types.Set `tfsdk:"plugins"` + UserSettingsJson types.String `tfsdk:"user_settings_json"` + UserSettingsOverrideJson types.String `tfsdk:"user_settings_override_json"` + UserSettingsYaml types.String `tfsdk:"user_settings_yaml"` + UserSettingsOverrideYaml types.String `tfsdk:"user_settings_override_yaml"` +} + +type ElasticsearchTopologyConfig struct { + Plugins []string `tfsdk:"plugins"` + UserSettingsJson *string `tfsdk:"user_settings_json"` + UserSettingsOverrideJson *string `tfsdk:"user_settings_override_json"` + UserSettingsYaml *string `tfsdk:"user_settings_yaml"` + UserSettingsOverrideYaml *string `tfsdk:"user_settings_override_yaml"` +} + +type ElasticsearchTopologyConfigs []ElasticsearchTopologyConfig diff --git a/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_trust_account.go b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_trust_account.go new file mode 100644 index 000000000..a386cee0b --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_trust_account.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ElasticsearchTrustAccountTF struct { + AccountId types.String `tfsdk:"account_id"` + TrustAll types.Bool `tfsdk:"trust_all"` + TrustAllowlist types.Set `tfsdk:"trust_allowlist"` +} + +type ElasticsearchTrustAccountsTF types.Set + +type ElasticsearchTrustAccount struct { + AccountId *string `tfsdk:"account_id"` + TrustAll *bool `tfsdk:"trust_all"` + TrustAllowlist []string `tfsdk:"trust_allowlist"` +} + +type ElasticsearchTrustAccounts []ElasticsearchTrustAccount diff --git a/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_trust_external.go b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_trust_external.go new file mode 100644 index 000000000..4c20cd1be --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_trust_external.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ElasticsearchTrustExternalTF struct { + RelationshipId types.String `tfsdk:"relationship_id"` + TrustAll types.Bool `tfsdk:"trust_all"` + TrustAllowlist types.Set `tfsdk:"trust_allowlist"` +} + +type ElasticsearchTrustExternal struct { + RelationshipId *string `tfsdk:"relationship_id"` + TrustAll *bool `tfsdk:"trust_all"` + TrustAllowlist []string `tfsdk:"trust_allowlist"` +} + +type ElasticsearchTrustExternals []ElasticsearchTrustExternal diff --git a/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go b/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go new file mode 100644 index 000000000..c3c5ddbf5 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go @@ -0,0 +1,544 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "context" + "strings" + + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/elastic/terraform-provider-ec/ec/internal/validators" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// These constants are only used to determine whether or not a dedicated +// tier of masters or ingest (coordinating) nodes are set. +const ( + dataTierRolePrefix = "data_" + ingestDataTierRole = "ingest" + masterDataTierRole = "master" + autodetect = "autodetect" + growAndShrink = "grow_and_shrink" + rollingGrowAndShrink = "rolling_grow_and_shrink" + rollingAll = "rolling_all" +) + +// List of update strategies availables. +var strategiesList = []string{ + autodetect, growAndShrink, rollingGrowAndShrink, rollingAll, +} + +func ElasticsearchSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Required Elasticsearch resource definition", + Required: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "autoscale": { + Type: types.StringType, + Description: `Enable or disable autoscaling. Defaults to the setting coming from the deployment template. Accepted values are "true" or "false".`, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "ref_id": { + Type: types.StringType, + Description: "Optional ref_id to set on the Elasticsearch resource", + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), + resource.UseStateForUnknown(), + }, + }, + "resource_id": { + Type: types.StringType, + Description: "The Elasticsearch resource unique identifier", + Computed: true, + // PlanModifiers: tfsdk.AttributePlanModifiers{ + // resource.UseStateForUnknown(), + // }, + }, + "region": { + Type: types.StringType, + Description: "The Elasticsearch resource region", + Computed: true, + // PlanModifiers: tfsdk.AttributePlanModifiers{ + // resource.UseStateForUnknown(), + // }, + }, + "cloud_id": { + Type: types.StringType, + Description: "The encoded Elasticsearch credentials to use in Beats or Logstash", + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + resource.RequiresReplaceIf(func(ctx context.Context, state, config attr.Value, path path.Path) (bool, diag.Diagnostics) { + return true, nil + }, "", ""), + }, + }, + "http_endpoint": { + Type: types.StringType, + Description: "The Elasticsearch resource HTTP endpoint", + Computed: true, + // PlanModifiers: tfsdk.AttributePlanModifiers{ + // resource.UseStateForUnknown(), + // }, + }, + "https_endpoint": { + Type: types.StringType, + Description: "The Elasticsearch resource HTTPs endpoint", + Computed: true, + // PlanModifiers: tfsdk.AttributePlanModifiers{ + // resource.UseStateForUnknown(), + // }, + }, + "topology": ElasticsearchTopologySchema(), + + "trust_account": ElasticsearchTrustAccountSchema(), + + "trust_external": ElasticsearchTrustExternalSchema(), + + "config": ElasticsearchConfigSchema(), + + "remote_cluster": ElasticsearchRemoteClusterSchema(), + + "snapshot_source": ElasticsearchSnapshotSourceSchema(), + + "extension": ElasticsearchExtensionSchema(), + + "strategy": ElasticsearchStrategySchema(), + }), + } +} + +func ElasticsearchConfigSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: `Optional Elasticsearch settings which will be applied to all topologies unless overridden on the topology element`, + Optional: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + // TODO + // DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + "docker_image": { + Type: types.StringType, + Description: "Optionally override the docker image the Elasticsearch nodes will use. Note that this field will only work for internal users only.", + Optional: true, + }, + "plugins": { + Type: types.SetType{ + ElemType: types.StringType, + }, + Description: "List of Elasticsearch supported plugins, which vary from version to version. Check the Stack Pack version to see which plugins are supported for each version. This is currently only available from the UI and [ecctl](https://www.elastic.co/guide/en/ecctl/master/ecctl_stack_list.html)", + Optional: true, + }, + "user_settings_json": { + Type: types.StringType, + Description: `JSON-formatted user level "elasticsearch.yml" setting overrides`, + Optional: true, + }, + "user_settings_override_json": { + Type: types.StringType, + Description: `JSON-formatted admin (ECE) level "elasticsearch.yml" setting overrides`, + Optional: true, + }, + "user_settings_yaml": { + Type: types.StringType, + Description: `YAML-formatted user level "elasticsearch.yml" setting overrides`, + Optional: true, + }, + "user_settings_override_yaml": { + Type: types.StringType, + Description: `YAML-formatted admin (ECE) level "elasticsearch.yml" setting overrides`, + Optional: true, + }, + }), + } +} + +func ElasticsearchTopologySchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Computed: true, + Optional: true, + Description: `Optional topology element which must be set once but can be set multiple times to compose complex topologies`, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "id": { + Type: types.StringType, + Description: `Required topology ID from the deployment template`, + Required: true, + }, + "instance_configuration_id": { + Type: types.StringType, + Description: `Computed Instance Configuration ID of the topology element`, + Computed: true, + }, + "size": { + Type: types.StringType, + Description: `Optional amount of memory per node in the "g" notation`, + Computed: true, + Optional: true, + }, + "size_resource": { + Type: types.StringType, + Description: `Optional size type, defaults to "memory".`, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "memory"}), + }, + }, + "zone_count": { + Type: types.Int64Type, + Description: `Optional number of zones that the Elasticsearch cluster will span. This is used to set HA`, + Computed: true, + Optional: true, + }, + "node_type_data": { + Type: types.StringType, + Description: `The node type for the Elasticsearch Topology element (data node)`, + Computed: true, + Optional: true, + }, + "node_type_master": { + Type: types.StringType, + Description: `The node type for the Elasticsearch Topology element (master node)`, + Computed: true, + Optional: true, + }, + "node_type_ingest": { + Type: types.StringType, + Description: `The node type for the Elasticsearch Topology element (ingest node)`, + Computed: true, + Optional: true, + }, + "node_type_ml": { + Type: types.StringType, + Description: `The node type for the Elasticsearch Topology element (machine learning node)`, + Computed: true, + Optional: true, + }, + "node_roles": { + Type: types.SetType{ + ElemType: types.StringType, + }, + Description: `The computed list of node roles for the current topology element`, + Computed: true, + }, + "autoscaling": ElasticsearchTopologyAutoscalingSchema(), + "config": ElasticsearchTopologyConfigSchema(), + }), + } +} + +func ElasticsearchTopologyAutoscalingSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Elasticsearch autoscaling settings, such a maximum and minimum size and resources.", + Optional: true, + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "max_size_resource": { + Description: "Maximum resource type for the maximum autoscaling setting.", + Type: types.StringType, + Optional: true, + Computed: true, + }, + "max_size": { + Description: "Maximum size value for the maximum autoscaling setting.", + Type: types.StringType, + Optional: true, + Computed: true, + }, + "min_size_resource": { + Description: "Minimum resource type for the minimum autoscaling setting.", + Type: types.StringType, + Optional: true, + Computed: true, + }, + "min_size": { + Description: "Minimum size value for the minimum autoscaling setting.", + Type: types.StringType, + Optional: true, + Computed: true, + }, + "policy_override_json": { + Type: types.StringType, + Description: "Computed policy overrides set directly via the API or other clients.", + Computed: true, + }, + }), + } +} + +func ElasticsearchRemoteClusterSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Elasticsearch remote clusters to configure for the Elasticsearch resource, can be set multiple times", + Optional: true, + Attributes: tfsdk.SetNestedAttributes(map[string]tfsdk.Attribute{ + "deployment_id": { + Description: "Remote deployment ID", + Type: types.StringType, + // TODO fix examples/deployment_css/deployment.tf#61 + // Validators: []tfsdk.AttributeValidator{validators.Length(32, 32)}, + Required: true, + }, + "alias": { + Description: "Alias for this Cross Cluster Search binding", + Type: types.StringType, + // TODO fix examples/deployment_css/deployment.tf#62 + // Validators: []tfsdk.AttributeValidator{validators.NotEmpty()}, + Required: true, + }, + "ref_id": { + Description: `Remote elasticsearch "ref_id", it is best left to the default value`, + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), + resource.UseStateForUnknown(), + }, + Optional: true, + }, + "skip_unavailable": { + Description: "If true, skip the cluster during search when disconnected", + Type: types.BoolType, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.Bool{Value: false}), + }, + Optional: true, + }, + }), + } +} + +func ElasticsearchSnapshotSourceSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional snapshot source settings. Restore data from a snapshot of another deployment.", + Optional: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "source_elasticsearch_cluster_id": { + Description: "ID of the Elasticsearch cluster that will be used as the source of the snapshot", + Type: types.StringType, + Required: true, + }, + "snapshot_name": { + Description: "Name of the snapshot to restore. Use '__latest_success__' to get the most recent successful snapshot.", + Type: types.StringType, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "__latest_success__"}), + resource.UseStateForUnknown(), + }, + Optional: true, + Computed: true, + }, + }), + } +} + +func ElasticsearchExtensionSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Elasticsearch extensions such as custom bundles or plugins.", + Optional: true, + Attributes: tfsdk.SetNestedAttributes(map[string]tfsdk.Attribute{ + "name": { + Description: "Extension name.", + Type: types.StringType, + Required: true, + }, + "type": { + Description: "Extension type, only `bundle` or `plugin` are supported.", + Type: types.StringType, + Required: true, + Validators: []tfsdk.AttributeValidator{validators.OneOf([]string{`"bundle"`, `"plugin"`})}, + }, + "version": { + Description: "Elasticsearch compatibility version. Bundles should specify major or minor versions with wildcards, such as `7.*` or `*` but **plugins must use full version notation down to the patch level**, such as `7.10.1` and wildcards are not allowed.", + Type: types.StringType, + Required: true, + }, + "url": { + Description: "Bundle or plugin URL, the extension URL can be obtained from the `ec_deployment_extension..url` attribute or the API and cannot be a random HTTP address that is hosted elsewhere.", + Type: types.StringType, + Required: true, + }, + }), + } +} + +func ElasticsearchTrustAccountSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Elasticsearch account trust settings.", + Attributes: tfsdk.SetNestedAttributes(map[string]tfsdk.Attribute{ + "account_id": { + Description: "The ID of the Account.", + Type: types.StringType, + Required: true, + }, + "trust_all": { + Description: "If true, all clusters in this account will by default be trusted and the `trust_allowlist` is ignored.", + Type: types.BoolType, + Required: true, + }, + "trust_allowlist": { + Description: "The list of clusters to trust. Only used when `trust_all` is false.", + Type: types.SetType{ + ElemType: types.StringType, + }, + Optional: true, + }, + }), + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + } +} + +func ElasticsearchTrustExternalSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Elasticsearch external trust settings.", + Attributes: tfsdk.SetNestedAttributes(map[string]tfsdk.Attribute{ + "relationship_id": { + Description: "The ID of the external trust relationship.", + Type: types.StringType, + Required: true, + }, + "trust_all": { + Description: "If true, all clusters in this account will by default be trusted and the `trust_allowlist` is ignored.", + Type: types.BoolType, + Required: true, + }, + "trust_allowlist": { + Description: "The list of clusters to trust. Only used when `trust_all` is false.", + Type: types.SetType{ + ElemType: types.StringType, + }, + Optional: true, + }, + }), + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + } +} + +func ElasticsearchStrategySchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Configuration strategy settings.", + Optional: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "type": { + Description: "Configuration strategy type " + strings.Join(strategiesList, ", "), + Type: types.StringType, + Required: true, + Validators: []tfsdk.AttributeValidator{validators.OneOf(strategiesList)}, + // TODO + // changes on this setting do not change the plan. + // DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + // return true + // }, + }, + }), + } +} + +func ElasticsearchTopologyConfigSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: `Computed read-only configuration to avoid unsetting plan settings from 'topology.elasticsearch'`, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + planmodifier.DefaultValue(types.List{ + Null: true, + ElemType: types.ObjectType{ + AttrTypes: map[string]attr.Type{ + "plugins": types.SetType{ + ElemType: types.StringType, + }, + "user_settings_json": types.StringType, + "user_settings_override_json": types.StringType, + "user_settings_yaml": types.StringType, + "user_settings_override_yaml": types.StringType, + }, + }, + }), + }, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "plugins": { + Type: types.SetType{ + ElemType: types.StringType, + }, + Description: "List of Elasticsearch supported plugins, which vary from version to version. Check the Stack Pack version to see which plugins are supported for each version. This is currently only available from the UI and [ecctl](https://www.elastic.co/guide/en/ecctl/master/ecctl_stack_list.html)", + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "user_settings_json": { + Type: types.StringType, + Description: `JSON-formatted user level "elasticsearch.yml" setting overrides`, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "user_settings_override_json": { + Type: types.StringType, + Description: `JSON-formatted admin (ECE) level "elasticsearch.yml" setting overrides`, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "user_settings_yaml": { + Type: types.StringType, + Description: `YAML-formatted user level "elasticsearch.yml" setting overrides`, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "user_settings_override_yaml": { + Type: types.StringType, + Description: `YAML-formatted admin (ECE) level "elasticsearch.yml" setting overrides`, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + }), + } +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch.go new file mode 100644 index 000000000..977a1a225 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch.go @@ -0,0 +1,413 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "strconv" + "strings" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/elastic/terraform-provider-ec/ec/internal/util" +) + +type ElasticsearchTF struct { + Autoscale types.String `tfsdk:"autoscale"` + RefId types.String `tfsdk:"ref_id"` + ResourceId types.String `tfsdk:"resource_id"` + Region types.String `tfsdk:"region"` + CloudID types.String `tfsdk:"cloud_id"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + HotContentTier types.Object `tfsdk:"hot"` + CoordinatingTier types.Object `tfsdk:"coordinating"` + MasterTier types.Object `tfsdk:"master"` + WarmTier types.Object `tfsdk:"warm"` + ColdTier types.Object `tfsdk:"cold"` + FrozenTier types.Object `tfsdk:"frozen"` + MlTier types.Object `tfsdk:"ml"` + Config types.Object `tfsdk:"config"` + RemoteCluster types.Set `tfsdk:"remote_cluster"` + SnapshotSource types.Object `tfsdk:"snapshot_source"` + Extension types.Set `tfsdk:"extension"` + TrustAccount types.Set `tfsdk:"trust_account"` + TrustExternal types.Set `tfsdk:"trust_external"` + Strategy types.String `tfsdk:"strategy"` +} + +type Elasticsearch struct { + Autoscale *string `tfsdk:"autoscale"` + RefId *string `tfsdk:"ref_id"` + ResourceId *string `tfsdk:"resource_id"` + Region *string `tfsdk:"region"` + CloudID *string `tfsdk:"cloud_id"` + HttpEndpoint *string `tfsdk:"http_endpoint"` + HttpsEndpoint *string `tfsdk:"https_endpoint"` + HotTier *ElasticsearchTopology `tfsdk:"hot"` + CoordinatingTier *ElasticsearchTopology `tfsdk:"coordinating"` + MasterTier *ElasticsearchTopology `tfsdk:"master"` + WarmTier *ElasticsearchTopology `tfsdk:"warm"` + ColdTier *ElasticsearchTopology `tfsdk:"cold"` + FrozenTier *ElasticsearchTopology `tfsdk:"frozen"` + MlTier *ElasticsearchTopology `tfsdk:"ml"` + Config *ElasticsearchConfig `tfsdk:"config"` + RemoteCluster ElasticsearchRemoteClusters `tfsdk:"remote_cluster"` + SnapshotSource *ElasticsearchSnapshotSource `tfsdk:"snapshot_source"` + Extension ElasticsearchExtensions `tfsdk:"extension"` + TrustAccount ElasticsearchTrustAccounts `tfsdk:"trust_account"` + TrustExternal ElasticsearchTrustExternals `tfsdk:"trust_external"` + Strategy *string `tfsdk:"strategy"` +} + +func ElasticsearchPayload(ctx context.Context, esObj types.Object, template *models.DeploymentTemplateInfoV2, dtID, version string, useNodeRoles bool, skipTopologies bool) (*models.ElasticsearchPayload, diag.Diagnostics) { + var es *ElasticsearchTF + + if esObj.IsNull() || esObj.IsUnknown() { + return nil, nil + } + + if diags := tfsdk.ValueAs(ctx, esObj, &es); diags.HasError() { + return nil, diags + } + + if es == nil { + return nil, nil + } + + if es == nil { + var diags diag.Diagnostics + diags.AddError("Elasticsearch payload error", "cannot find elasticsearch data") + return nil, diags + } + + templatePayload := utils.EnrichElasticsearchTemplate(utils.EsResource(template), dtID, version, useNodeRoles) + + payload, diags := es.Payload(ctx, templatePayload, skipTopologies) + if diags.HasError() { + return nil, diags + } + + return payload, nil +} + +func ReadElasticsearches(in []*models.ElasticsearchResourceInfo, remotes *models.RemoteResources) (*Elasticsearch, error) { + for _, model := range in { + if util.IsCurrentEsPlanEmpty(model) || utils.IsEsResourceStopped(model) { + continue + } + es, err := ReadElasticsearch(model, remotes) + if err != nil { + return nil, err + } + return es, nil + } + + return nil, nil +} + +func ReadElasticsearch(in *models.ElasticsearchResourceInfo, remotes *models.RemoteResources) (*Elasticsearch, error) { + var es Elasticsearch + + if util.IsCurrentEsPlanEmpty(in) || utils.IsEsResourceStopped(in) { + return &es, nil + } + + if in.Info.ClusterID != nil && *in.Info.ClusterID != "" { + es.ResourceId = in.Info.ClusterID + } + + if in.RefID != nil && *in.RefID != "" { + es.RefId = in.RefID + } + + if in.Region != nil { + es.Region = in.Region + } + + plan := in.Info.PlanInfo.Current.Plan + var err error + + topologies, err := ReadElasticsearchTopologies(plan) + if err != nil { + return nil, err + } + es.setTopology(topologies) + + if plan.AutoscalingEnabled != nil { + es.Autoscale = ec.String(strconv.FormatBool(*plan.AutoscalingEnabled)) + } + + if meta := in.Info.Metadata; meta != nil && meta.CloudID != "" { + es.CloudID = &meta.CloudID + } + + es.HttpEndpoint, es.HttpsEndpoint = converters.ExtractEndpoints(in.Info.Metadata) + + es.Config, err = ReadElasticsearchConfig(plan.Elasticsearch) + if err != nil { + return nil, err + } + + clusters, err := ReadElasticsearchRemoteClusters(remotes.Resources) + if err != nil { + return nil, err + } + es.RemoteCluster = clusters + + extensions, err := ReadElasticsearchExtensions(plan.Elasticsearch) + if err != nil { + return nil, err + } + es.Extension = extensions + + accounts, err := ReadElasticsearchTrustAccounts(in.Info.Settings) + if err != nil { + return nil, err + } + es.TrustAccount = accounts + + externals, err := ReadElasticsearchTrustExternals(in.Info.Settings) + if err != nil { + return nil, err + } + es.TrustExternal = externals + + return &es, nil +} + +func (es *ElasticsearchTF) Payload(ctx context.Context, res *models.ElasticsearchPayload, skipTopologies bool) (*models.ElasticsearchPayload, diag.Diagnostics) { + var diags diag.Diagnostics + + if !es.RefId.IsNull() { + res.RefID = &es.RefId.Value + } + + if es.Region.Value != "" { + res.Region = &es.Region.Value + } + + // Unsetting the curation properties is since they're deprecated since + // >= 6.6.0 which is when ILM is introduced in Elasticsearch. + unsetElasticsearchCuration(res) + + var ds diag.Diagnostics + + if !skipTopologies { + diags.Append(es.topologiesPayload(ctx, res.Plan.ClusterTopology)...) + } + + // Fixes the node_roles field to remove the dedicated tier roles from the + // list when these are set as a dedicated tier as a topology element. + UpdateNodeRolesOnDedicatedTiers(res.Plan.ClusterTopology) + + res.Plan.Elasticsearch, ds = ElasticsearchConfigPayload(ctx, es.Config, res.Plan.Elasticsearch) + diags.Append(ds...) + + diags.Append(elasticsearchSnapshotSourcePayload(ctx, es.SnapshotSource, res.Plan)...) + + diags.Append(elasticsearchExtensionPayload(ctx, es.Extension, res.Plan.Elasticsearch)...) + + if es.Autoscale.Value != "" { + autoscaleBool, err := strconv.ParseBool(es.Autoscale.Value) + if err != nil { + diags.AddError("failed parsing autoscale value", err.Error()) + } else { + res.Plan.AutoscalingEnabled = &autoscaleBool + } + } + + res.Settings, ds = ElasticsearchTrustAccountPayload(ctx, es.TrustAccount, res.Settings) + diags.Append(ds...) + + res.Settings, ds = ElasticsearchTrustExternalPayload(ctx, es.TrustExternal, res.Settings) + diags.Append(ds...) + + elasticsearchStrategyPayload(es.Strategy, res.Plan) + + return res, diags +} + +func (es *ElasticsearchTF) topologiesPayload(ctx context.Context, topologies []*models.ElasticsearchClusterTopologyElement) diag.Diagnostics { + var diags diag.Diagnostics + + diags.Append(topologyPayload(ctx, es.HotContentTier, "hot_content", topologies)...) + diags.Append(topologyPayload(ctx, es.CoordinatingTier, "coordinating", topologies)...) + diags.Append(topologyPayload(ctx, es.MasterTier, "master", topologies)...) + diags.Append(topologyPayload(ctx, es.WarmTier, "warm", topologies)...) + diags.Append(topologyPayload(ctx, es.ColdTier, "cold", topologies)...) + diags.Append(topologyPayload(ctx, es.FrozenTier, "frozen", topologies)...) + diags.Append(topologyPayload(ctx, es.MlTier, "ml", topologies)...) + + return diags +} + +func topologyPayload(ctx context.Context, topologyObj types.Object, id string, topologies []*models.ElasticsearchClusterTopologyElement) diag.Diagnostics { + var diags diag.Diagnostics + + if !topologyObj.IsNull() && !topologyObj.IsUnknown() { + var topology ElasticsearchTopologyTF + + ds := tfsdk.ValueAs(ctx, topologyObj, &topology) + diags.Append(ds...) + + if !ds.HasError() { + diags.Append(topology.Payload(ctx, id, topologies)...) + } + } + + return diags +} + +func (es *Elasticsearch) setTopology(topologies ElasticsearchTopologies) { + set := topologies.Set() + + for id, topology := range set { + topology := topology + switch id { + case "hot_content": + es.HotTier = &topology + case "coordinating": + es.CoordinatingTier = &topology + case "master": + es.MasterTier = &topology + case "warm": + es.WarmTier = &topology + case "cold": + es.ColdTier = &topology + case "frozen": + es.FrozenTier = &topology + case "ml": + es.MlTier = &topology + } + } +} + +func unsetElasticsearchCuration(payload *models.ElasticsearchPayload) { + if payload.Plan.Elasticsearch != nil { + payload.Plan.Elasticsearch.Curation = nil + } + + if payload.Settings != nil { + payload.Settings.Curation = nil + } +} + +func UpdateNodeRolesOnDedicatedTiers(topologies []*models.ElasticsearchClusterTopologyElement) { + dataTier, hasMasterTier, hasIngestTier := dedicatedTopoogies(topologies) + // This case is not very likely since all deployments will have a data tier. + // It's here because the code path is technically possible and it's better + // than a straight panic. + if dataTier == nil { + return + } + + if hasIngestTier { + dataTier.NodeRoles = removeItemFromSlice( + dataTier.NodeRoles, ingestDataTierRole, + ) + } + if hasMasterTier { + dataTier.NodeRoles = removeItemFromSlice( + dataTier.NodeRoles, masterDataTierRole, + ) + } +} + +func removeItemFromSlice(slice []string, item string) []string { + var hasItem bool + var itemIndex int + for i, str := range slice { + if str == item { + hasItem = true + itemIndex = i + } + } + if hasItem { + copy(slice[itemIndex:], slice[itemIndex+1:]) + return slice[:len(slice)-1] + } + return slice +} + +func dedicatedTopoogies(topologies []*models.ElasticsearchClusterTopologyElement) (dataTier *models.ElasticsearchClusterTopologyElement, hasMasterTier, hasIngestTier bool) { + for _, topology := range topologies { + var hasSomeDataRole bool + var hasMasterRole bool + var hasIngestRole bool + for _, role := range topology.NodeRoles { + sizeNonZero := *topology.Size.Value > 0 + if strings.HasPrefix(role, dataTierRolePrefix) && sizeNonZero { + hasSomeDataRole = true + } + if role == ingestDataTierRole && sizeNonZero { + hasIngestRole = true + } + if role == masterDataTierRole && sizeNonZero { + hasMasterRole = true + } + } + + if !hasSomeDataRole && hasMasterRole { + hasMasterTier = true + } + + if !hasSomeDataRole && hasIngestRole { + hasIngestTier = true + } + + if hasSomeDataRole && hasMasterRole { + dataTier = topology + } + } + + return dataTier, hasMasterTier, hasIngestTier +} + +func elasticsearchStrategyPayload(strategy types.String, payload *models.ElasticsearchClusterPlan) { + createModelIfNeeded := func() { + if payload.Transient == nil { + payload.Transient = &models.TransientElasticsearchPlanConfiguration{ + Strategy: &models.PlanStrategy{}, + } + } + } + + switch strategy.Value { + case autodetect: + createModelIfNeeded() + payload.Transient.Strategy.Autodetect = new(models.AutodetectStrategyConfig) + case growAndShrink: + createModelIfNeeded() + payload.Transient.Strategy.GrowAndShrink = new(models.GrowShrinkStrategyConfig) + case rollingGrowAndShrink: + createModelIfNeeded() + payload.Transient.Strategy.RollingGrowAndShrink = new(models.RollingGrowShrinkStrategyConfig) + case rollingAll: + createModelIfNeeded() + payload.Transient.Strategy.Rolling = &models.RollingStrategyConfig{ + GroupBy: "__all__", + } + } +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_config.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_config.go new file mode 100644 index 000000000..4f444b474 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_config.go @@ -0,0 +1,120 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "bytes" + "context" + "encoding/json" + "reflect" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v1" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +type ElasticsearchConfig v1.ElasticsearchConfig + +func (c *ElasticsearchConfig) IsEmpty() bool { + return c == nil || reflect.ValueOf(*c).IsZero() +} + +func ReadElasticsearchConfig(in *models.ElasticsearchConfiguration) (*ElasticsearchConfig, error) { + var config ElasticsearchConfig + + if in == nil { + return &ElasticsearchConfig{}, nil + } + + if len(in.EnabledBuiltInPlugins) > 0 { + config.Plugins = append(config.Plugins, in.EnabledBuiltInPlugins...) + } + + if in.UserSettingsYaml != "" { + config.UserSettingsYaml = &in.UserSettingsYaml + } + + if in.UserSettingsOverrideYaml != "" { + config.UserSettingsOverrideYaml = &in.UserSettingsOverrideYaml + } + + if o := in.UserSettingsJSON; o != nil { + if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { + config.UserSettingsJson = ec.String(string(b)) + } + } + + if o := in.UserSettingsOverrideJSON; o != nil { + if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { + config.UserSettingsOverrideJson = ec.String(string(b)) + } + } + + if in.DockerImage != "" { + config.DockerImage = ec.String(in.DockerImage) + } + + return &config, nil +} + +func ElasticsearchConfigPayload(ctx context.Context, cfgObj attr.Value, model *models.ElasticsearchConfiguration) (*models.ElasticsearchConfiguration, diag.Diagnostics) { + if cfgObj.IsNull() || cfgObj.IsUnknown() { + return model, nil + } + + var cfg v1.ElasticsearchConfigTF + + diags := tfsdk.ValueAs(ctx, cfgObj, &cfg) + + if diags.HasError() { + return nil, diags + } + + if cfg.UserSettingsJson.Value != "" { + if err := json.Unmarshal([]byte(cfg.UserSettingsJson.Value), &model.UserSettingsJSON); err != nil { + diags.AddError("failed expanding elasticsearch user_settings_json", err.Error()) + } + } + + if cfg.UserSettingsOverrideJson.Value != "" { + if err := json.Unmarshal([]byte(cfg.UserSettingsOverrideJson.Value), &model.UserSettingsOverrideJSON); err != nil { + diags.AddError("failed expanding elasticsearch user_settings_override_json", err.Error()) + } + } + + if !cfg.UserSettingsYaml.IsNull() { + model.UserSettingsYaml = cfg.UserSettingsYaml.Value + } + + if !cfg.UserSettingsOverrideYaml.IsNull() { + model.UserSettingsOverrideYaml = cfg.UserSettingsOverrideYaml.Value + } + + ds := cfg.Plugins.ElementsAs(ctx, &model.EnabledBuiltInPlugins, true) + + diags = append(diags, ds...) + + if !cfg.DockerImage.IsNull() { + model.DockerImage = cfg.DockerImage.Value + } + + return model, diags +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_extension.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_extension.go new file mode 100644 index 000000000..8371d6e04 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_extension.go @@ -0,0 +1,136 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/elastic/cloud-sdk-go/pkg/models" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v1" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ElasticsearchExtensions v1.ElasticsearchExtensions + +func ReadElasticsearchExtensions(in *models.ElasticsearchConfiguration) (ElasticsearchExtensions, error) { + if len(in.UserBundles) == 0 && len(in.UserPlugins) == 0 { + return nil, nil + } + + extensions := make(ElasticsearchExtensions, 0, len(in.UserBundles)+len(in.UserPlugins)) + + for _, model := range in.UserBundles { + extension, err := ReadFromUserBundle(model) + if err != nil { + return nil, err + } + + extensions = append(extensions, *extension) + } + + for _, model := range in.UserPlugins { + extension, err := ReadFromUserPlugin(model) + if err != nil { + return nil, err + } + + extensions = append(extensions, *extension) + } + + return extensions, nil +} + +func elasticsearchExtensionPayload(ctx context.Context, extensions types.Set, es *models.ElasticsearchConfiguration) diag.Diagnostics { + for _, elem := range extensions.Elems { + var extension v1.ElasticsearchExtensionTF + + if diags := tfsdk.ValueAs(ctx, elem, &extension); diags.HasError() { + return diags + } + + version := extension.Version.Value + url := extension.Url.Value + name := extension.Name.Value + + if extension.Type.Value == "bundle" { + es.UserBundles = append(es.UserBundles, &models.ElasticsearchUserBundle{ + Name: &name, + ElasticsearchVersion: &version, + URL: &url, + }) + } + + if extension.Type.Value == "plugin" { + es.UserPlugins = append(es.UserPlugins, &models.ElasticsearchUserPlugin{ + Name: &name, + ElasticsearchVersion: &version, + URL: &url, + }) + } + } + return nil +} + +func ReadFromUserBundle(in *models.ElasticsearchUserBundle) (*v1.ElasticsearchExtension, error) { + var ext v1.ElasticsearchExtension + + ext.Type = "bundle" + + if in.ElasticsearchVersion == nil { + return nil, utils.MissingField("ElasticsearchUserBundle.ElasticsearchVersion") + } + ext.Version = *in.ElasticsearchVersion + + if in.URL == nil { + return nil, utils.MissingField("ElasticsearchUserBundle.URL") + } + ext.Url = *in.URL + + if in.Name == nil { + return nil, utils.MissingField("ElasticsearchUserBundle.Name") + } + ext.Name = *in.Name + + return &ext, nil +} + +func ReadFromUserPlugin(in *models.ElasticsearchUserPlugin) (*v1.ElasticsearchExtension, error) { + var ext v1.ElasticsearchExtension + + ext.Type = "plugin" + + if in.ElasticsearchVersion == nil { + return nil, utils.MissingField("ElasticsearchUserPlugin.ElasticsearchVersion") + } + ext.Version = *in.ElasticsearchVersion + + if in.URL == nil { + return nil, utils.MissingField("ElasticsearchUserPlugin.URL") + } + ext.Url = *in.URL + + if in.Name == nil { + return nil, utils.MissingField("ElasticsearchUserPlugin.Name") + } + ext.Name = *in.Name + + return &ext, nil +} diff --git a/ec/ecresource/deploymentresource/elasticsearch_expanders_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go similarity index 73% rename from ec/ecresource/deploymentresource/elasticsearch_expanders_test.go rename to ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go index e2f966947..88971b57a 100644 --- a/ec/ecresource/deploymentresource/elasticsearch_expanders_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go @@ -15,25 +15,30 @@ // specific language governing permissions and limitations // under the License. -package deploymentresource +package v2 import ( - "errors" + "context" "testing" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/stretchr/testify/assert" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/testutil" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" ) -func Test_expandEsResource(t *testing.T) { - tplPath := "testdata/template-aws-io-optimized-v2.json" +func Test_writeElasticsearch(t *testing.T) { + tplPath := "../../testdata/template-aws-io-optimized-v2.json" tp770 := func() *models.ElasticsearchPayload { - return enrichElasticsearchTemplate( - esResource(parseDeploymentTemplate(t, tplPath)), + return utils.EnrichElasticsearchTemplate( + utils.EsResource(testutil.ParseDeploymentTemplate(t, tplPath)), "aws-io-optimized-v2", "7.7.0", false, @@ -41,8 +46,8 @@ func Test_expandEsResource(t *testing.T) { } create710 := func() *models.ElasticsearchPayload { - return enrichElasticsearchTemplate( - esResource(parseDeploymentTemplate(t, tplPath)), + return utils.EnrichElasticsearchTemplate( + utils.EsResource(testutil.ParseDeploymentTemplate(t, tplPath)), "aws-io-optimized-v2", "7.10.0", true, @@ -50,18 +55,18 @@ func Test_expandEsResource(t *testing.T) { } update711 := func() *models.ElasticsearchPayload { - return enrichElasticsearchTemplate( - esResource(parseDeploymentTemplate(t, tplPath)), + return utils.EnrichElasticsearchTemplate( + utils.EsResource(testutil.ParseDeploymentTemplate(t, tplPath)), "aws-io-optimized-v2", "7.11.0", true, ) } - hotWarmTplPath := "testdata/template-aws-hot-warm-v2.json" + hotWarmTplPath := "../../testdata/template-aws-hot-warm-v2.json" hotWarmTpl770 := func() *models.ElasticsearchPayload { - return enrichElasticsearchTemplate( - esResource(parseDeploymentTemplate(t, hotWarmTplPath)), + return utils.EnrichElasticsearchTemplate( + utils.EsResource(testutil.ParseDeploymentTemplate(t, hotWarmTplPath)), "aws-io-optimized-v2", "7.7.0", false, @@ -69,18 +74,18 @@ func Test_expandEsResource(t *testing.T) { } hotWarm7111Tpl := func() *models.ElasticsearchPayload { - return enrichElasticsearchTemplate( - esResource(parseDeploymentTemplate(t, hotWarmTplPath)), + return utils.EnrichElasticsearchTemplate( + utils.EsResource(testutil.ParseDeploymentTemplate(t, hotWarmTplPath)), "aws-io-optimized-v2", "7.11.1", true, ) } - eceDefaultTplPath := "testdata/template-ece-3.0.0-default.json" + eceDefaultTplPath := "../../testdata/template-ece-3.0.0-default.json" eceDefaultTpl := func() *models.ElasticsearchPayload { - return enrichElasticsearchTemplate( - esResource(parseDeploymentTemplate(t, eceDefaultTplPath)), + return utils.EnrichElasticsearchTemplate( + utils.EsResource(testutil.ParseDeploymentTemplate(t, eceDefaultTplPath)), "aws-io-optimized-v2", "7.17.3", true, @@ -88,37 +93,37 @@ func Test_expandEsResource(t *testing.T) { } type args struct { - ess []interface{} - dt *models.ElasticsearchPayload + es Elasticsearch + template *models.DeploymentTemplateInfoV2 + templateID string + version string + useNodeRoles bool } tests := []struct { - name string - args args - want []*models.ElasticsearchPayload - err error + name string + args args + want *models.ElasticsearchPayload + diags diag.Diagnostics }{ - { - name: "returns nil when there's no resources", - }, { name: "parses an ES resource", args: args{ - dt: tp770(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "2g", - "zone_count": 1, - }}, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + HotTier: &ElasticsearchTopology{ + id: "hot_content", + Size: ec.String("2g"), + ZoneCount: 1, }, }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.7.0", + useNodeRoles: false, }, - want: enrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ + want: testutil.EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -169,21 +174,22 @@ func Test_expandEsResource(t *testing.T) { { name: "parses an ES resource with empty version (7.10.0) in state uses node_roles from the DT", args: args{ - dt: create710(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "2g", - "zone_count": 1, - }}, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + HotTier: &ElasticsearchTopology{ + id: "hot_content", + Size: ec.String("2g"), + ZoneCount: 1, }, }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.10.0", + useNodeRoles: true, }, - want: enrichWithEmptyTopologies(create710(), &models.ElasticsearchPayload{ + want: testutil.EnrichWithEmptyTopologies(create710(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -237,24 +243,23 @@ func Test_expandEsResource(t *testing.T) { { name: "parses an ES resource with version 7.11.0 has node_roles coming from the saved state", args: args{ - dt: update711(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "2g", - "zone_count": 1, - "node_roles": schema.NewSet(schema.HashString, []interface{}{ - "a", "b", "c", - }), - }}, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + HotTier: &ElasticsearchTopology{ + id: "hot_content", + Size: ec.String("2g"), + ZoneCount: 1, + NodeRoles: []string{"a", "b", "c"}, }, }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.11.0", + useNodeRoles: true, }, - want: enrichWithEmptyTopologies(update711(), &models.ElasticsearchPayload{ + want: testutil.EnrichWithEmptyTopologies(update711(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -300,40 +305,20 @@ func Test_expandEsResource(t *testing.T) { }, }), }, - { - name: "parses an ES resource with invalid id", - args: args{ - dt: tp770(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "topology": []interface{}{map[string]interface{}{ - "id": "invalid", - "size": "2g", - "zone_count": 1, - }}, - }, - }, - }, - err: errors.New(`elasticsearch topology invalid: invalid id: valid topology IDs are "coordinating", "hot_content", "warm", "cold", "master", "ml"`), - }, { name: "parses an ES resource without a topology", args: args{ - dt: tp770(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - }, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.7.0", + useNodeRoles: false, }, - want: enrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ + want: testutil.EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -384,29 +369,27 @@ func Test_expandEsResource(t *testing.T) { { name: "parses an ES resource (HotWarm)", args: args{ - dt: hotWarmTpl770(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "deployment_template_id": "aws-hot-warm-v2", - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "size": "2g", - "zone_count": 1, - }, - map[string]interface{}{ - "id": "warm", - "size": "2g", - "zone_count": 1, - }, - }, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + HotTier: &ElasticsearchTopology{ + id: "hot_content", + Size: ec.String("2g"), + ZoneCount: 1, + }, + WarmTier: &ElasticsearchTopology{ + id: "warm", + Size: ec.String("2g"), + ZoneCount: 1, }, }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-hot-warm-v2.json"), + templateID: "aws-hot-warm-v2", + version: "7.7.0", + useNodeRoles: false, }, - want: enrichWithEmptyTopologies(hotWarmTpl770(), &models.ElasticsearchPayload{ + want: testutil.EnrichWithEmptyTopologies(hotWarmTpl770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -488,33 +471,30 @@ func Test_expandEsResource(t *testing.T) { { name: "parses an ES resource with config (HotWarm)", args: args{ - dt: hotWarmTpl770(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "deployment_template_id": "aws-hot-warm-v2", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "somesetting: true", - }}, - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "size": "2g", - "zone_count": 1, - }, - map[string]interface{}{ - "id": "warm", - "size": "2g", - "zone_count": 1, - }, - }, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + Config: &ElasticsearchConfig{ + UserSettingsYaml: ec.String("somesetting: true"), + }, + HotTier: &ElasticsearchTopology{ + id: "hot_content", + Size: ec.String("2g"), + ZoneCount: 1, + }, + WarmTier: &ElasticsearchTopology{ + id: "warm", + Size: ec.String("2g"), + ZoneCount: 1, }, }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-hot-warm-v2.json"), + templateID: "aws-hot-warm-v2", + version: "7.7.0", + useNodeRoles: false, }, - want: enrichWithEmptyTopologies(hotWarmTpl770(), &models.ElasticsearchPayload{ + want: testutil.EnrichWithEmptyTopologies(hotWarmTpl770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -597,14 +577,17 @@ func Test_expandEsResource(t *testing.T) { { name: "parses an ES resource without a topology (HotWarm)", args: args{ - dt: hotWarmTpl770(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - }}, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-hot-warm-v2.json"), + templateID: "aws-hot-warm-v2", + version: "7.7.0", + useNodeRoles: false, }, - want: enrichWithEmptyTopologies(hotWarmTpl770(), &models.ElasticsearchPayload{ + want: testutil.EnrichWithEmptyTopologies(hotWarmTpl770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -686,27 +669,28 @@ func Test_expandEsResource(t *testing.T) { { name: "parses an ES resource with node type overrides (HotWarm)", args: args{ - dt: hotWarmTpl770(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "node_type_data": "false", - "node_type_master": "false", - "node_type_ingest": "false", - "node_type_ml": "true", - }, - map[string]interface{}{ - "id": "warm", - "node_type_master": "true", - }, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + HotTier: &ElasticsearchTopology{ + id: "hot_content", + NodeTypeData: ec.String("false"), + NodeTypeMaster: ec.String("false"), + NodeTypeIngest: ec.String("false"), + NodeTypeMl: ec.String("true"), + }, + WarmTier: &ElasticsearchTopology{ + id: "warm", + NodeTypeMaster: ec.String("true"), }, - }}, + }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-hot-warm-v2.json"), + templateID: "aws-hot-warm-v2", + version: "7.7.0", + useNodeRoles: false, }, - want: enrichWithEmptyTopologies(hotWarmTpl770(), &models.ElasticsearchPayload{ + want: testutil.EnrichWithEmptyTopologies(hotWarmTpl770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -789,31 +773,32 @@ func Test_expandEsResource(t *testing.T) { { name: "migrates old node_type state to new node_roles payload when the cold tier is set", args: args{ - dt: hotWarm7111Tpl(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "node_type_data": "false", - "node_type_master": "false", - "node_type_ingest": "false", - "node_type_ml": "true", - }, - map[string]interface{}{ - "id": "warm", - "node_type_master": "true", - }, - map[string]interface{}{ - "id": "cold", - "size": "2g", - }, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + HotTier: &ElasticsearchTopology{ + id: "hot_content", + NodeTypeData: ec.String("false"), + NodeTypeMaster: ec.String("false"), + NodeTypeIngest: ec.String("false"), + NodeTypeMl: ec.String("true"), + }, + WarmTier: &ElasticsearchTopology{ + id: "warm", + NodeTypeMaster: ec.String("true"), + }, + ColdTier: &ElasticsearchTopology{ + id: "cold", + Size: ec.String("2g"), }, - }}, + }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-hot-warm-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.11.1", + useNodeRoles: true, }, - want: enrichWithEmptyTopologies(hotWarm7111Tpl(), &models.ElasticsearchPayload{ + want: testutil.EnrichWithEmptyTopologies(hotWarm7111Tpl(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -925,27 +910,28 @@ func Test_expandEsResource(t *testing.T) { { name: "autoscaling enabled", args: args{ - dt: hotWarm7111Tpl(), - ess: []interface{}{map[string]interface{}{ - "autoscale": "true", - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - }, - map[string]interface{}{ - "id": "warm", - }, - map[string]interface{}{ - "id": "cold", - "size": "2g", - }, + es: Elasticsearch{ + Autoscale: ec.String("true"), + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + HotTier: &ElasticsearchTopology{ + id: "hot_content", + }, + WarmTier: &ElasticsearchTopology{ + id: "warm", }, - }}, + ColdTier: &ElasticsearchTopology{ + id: "cold", + Size: ec.String("2g"), + }, + }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-hot-warm-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.11.1", + useNodeRoles: true, }, - want: enrichWithEmptyTopologies(hotWarm7111Tpl(), &models.ElasticsearchPayload{ + want: testutil.EnrichWithEmptyTopologies(hotWarm7111Tpl(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1057,52 +1043,45 @@ func Test_expandEsResource(t *testing.T) { { name: "autoscaling enabled overriding the size with ml", args: args{ - dt: hotWarm7111Tpl(), - ess: []interface{}{map[string]interface{}{ - "autoscale": "true", - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "autoscaling": []interface{}{ - map[string]interface{}{ - "max_size": "58g", - }, - }, + es: Elasticsearch{ + Autoscale: ec.String("true"), + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + HotTier: &ElasticsearchTopology{ + id: "hot_content", + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("58g"), }, - map[string]interface{}{ - "id": "warm", - "autoscaling": []interface{}{ - map[string]interface{}{ - "max_size": "29g", - }, - }, + }, + WarmTier: &ElasticsearchTopology{ + id: "warm", + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("29g"), }, - map[string]interface{}{ - "id": "cold", - "size": "2g", - "autoscaling": []interface{}{ - map[string]interface{}{ - "max_size": "29g", - }, - }, + }, + ColdTier: &ElasticsearchTopology{ + id: "cold", + Size: ec.String("2g"), + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("29g"), }, - map[string]interface{}{ - "id": "ml", - "size": "1g", - "autoscaling": []interface{}{ - map[string]interface{}{ - "max_size": "29g", - "min_size": "1g", - }, - }, + }, + MlTier: &ElasticsearchTopology{ + id: "ml", + Size: ec.String("1g"), + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("29g"), + MinSize: ec.String("1g"), }, }, - }}, + }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-hot-warm-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.11.1", + useNodeRoles: true, }, - want: enrichWithEmptyTopologies(hotWarm7111Tpl(), &models.ElasticsearchPayload{ + want: testutil.EnrichWithEmptyTopologies(hotWarm7111Tpl(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1241,35 +1220,32 @@ func Test_expandEsResource(t *testing.T) { { name: "autoscaling enabled no dimension in template, default resource", args: args{ - dt: eceDefaultTpl(), - ess: []interface{}{map[string]interface{}{ - "autoscale": "true", - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "autoscaling": []interface{}{ - map[string]interface{}{ - "max_size": "450g", - "min_size": "2g", - }, - }, + es: Elasticsearch{ + Autoscale: ec.String("true"), + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + HotTier: &ElasticsearchTopology{ + id: "hot_content", + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("450g"), + MinSize: ec.String("2g"), }, - map[string]interface{}{ - "id": "master", - "autoscaling": []interface{}{ - map[string]interface{}{ - "max_size": "250g", - "min_size": "1g", - }, - }, + }, + MasterTier: &ElasticsearchTopology{ + id: "master", + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("250g"), + MinSize: ec.String("1g"), }, }, - }}, + }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-ece-3.0.0-default.json"), + templateID: "aws-io-optimized-v2", + version: "7.17.3", + useNodeRoles: true, }, - want: enrichWithEmptyTopologies(eceDefaultTpl(), &models.ElasticsearchPayload{ + want: testutil.EnrichWithEmptyTopologies(eceDefaultTpl(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1356,48 +1332,42 @@ func Test_expandEsResource(t *testing.T) { { name: "autoscaling enabled overriding the size and resources", args: args{ - dt: hotWarm7111Tpl(), - ess: []interface{}{map[string]interface{}{ - "autoscale": "true", - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "autoscaling": []interface{}{ - map[string]interface{}{ - "max_size_resource": "storage", - "max_size": "450g", - }, - }, + es: Elasticsearch{ + Autoscale: ec.String("true"), + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + HotTier: &ElasticsearchTopology{ + id: "hot_content", + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("450g"), + MaxSizeResource: ec.String("storage"), }, - map[string]interface{}{ - "id": "warm", - "autoscaling": []interface{}{ - map[string]interface{}{ - "max_size_resource": "storage", - "max_size": "870g", - }, - }, + }, + WarmTier: &ElasticsearchTopology{ + id: "warm", + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("870g"), + MaxSizeResource: ec.String("storage"), }, - map[string]interface{}{ - "id": "cold", - "size": "4g", - "autoscaling": []interface{}{ - map[string]interface{}{ - "max_size_resource": "storage", - "max_size": "1740g", - - "min_size_resource": "storage", - "min_size": "4g", - }, - }, + }, + ColdTier: &ElasticsearchTopology{ + id: "cold", + Size: ec.String("4g"), + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("1740g"), + MaxSizeResource: ec.String("storage"), + MinSizeResource: ec.String("storage"), + MinSize: ec.String("4g"), }, }, - }}, + }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-hot-warm-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.11.1", + useNodeRoles: true, }, - want: enrichWithEmptyTopologies(hotWarm7111Tpl(), &models.ElasticsearchPayload{ + want: testutil.EnrichWithEmptyTopologies(hotWarm7111Tpl(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1513,30 +1483,29 @@ func Test_expandEsResource(t *testing.T) { { name: "parses an ES resource with plugins", args: args{ - dt: tp770(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: value2", - "user_settings_json": "{\"some.setting\":\"value\"}", - "user_settings_override_json": "{\"some.setting\":\"value2\"}", - "plugins": schema.NewSet(schema.HashString, []interface{}{ - "plugin", - }), - }}, - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "2g", - "zone_count": 1, - }}, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + Config: &ElasticsearchConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), + Plugins: []string{"plugin"}, + }, + HotTier: &ElasticsearchTopology{ + id: "hot_content", + Size: ec.String("2g"), + ZoneCount: 1, }, }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.7.0", + useNodeRoles: false, }, - want: enrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ + want: testutil.EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1594,25 +1563,26 @@ func Test_expandEsResource(t *testing.T) { { name: "parses an ES resource with snapshot settings", args: args{ - dt: tp770(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "snapshot_source": []interface{}{map[string]interface{}{ - "snapshot_name": "__latest_success__", - "source_elasticsearch_cluster_id": mock.ValidClusterID, - }}, - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "2g", - "zone_count": 1, - }}, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + SnapshotSource: &ElasticsearchSnapshotSource{ + SnapshotName: "__latest_success__", + SourceElasticsearchClusterId: mock.ValidClusterID, + }, + HotTier: &ElasticsearchTopology{ + id: "hot_content", + Size: ec.String("2g"), + ZoneCount: 1, }, }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.7.0", + useNodeRoles: false, }, - want: enrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ + want: testutil.EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1667,26 +1637,23 @@ func Test_expandEsResource(t *testing.T) { { name: "parse autodetect configuration strategy", args: args{ - dt: tp770(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "2g", - "zone_count": 1, - }}, - "strategy": []interface{}{map[string]interface{}{ - "type": "autodetect", - }}, - }, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + HotTier: &ElasticsearchTopology{ + id: "hot_content", + Size: ec.String("2g"), + ZoneCount: 1, + }, + Strategy: ec.String("autodetect"), }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.7.0", + useNodeRoles: false, }, - - want: enrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ + want: testutil.EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1742,26 +1709,23 @@ func Test_expandEsResource(t *testing.T) { { name: "parse grow_and_shrink configuration strategy", args: args{ - dt: tp770(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "2g", - "zone_count": 1, - }}, - "strategy": []interface{}{map[string]interface{}{ - "type": "grow_and_shrink", - }}, - }, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + HotTier: &ElasticsearchTopology{ + id: "hot_content", + Size: ec.String("2g"), + ZoneCount: 1, + }, + Strategy: ec.String("grow_and_shrink"), }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.7.0", + useNodeRoles: false, }, - - want: enrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ + want: testutil.EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1817,26 +1781,23 @@ func Test_expandEsResource(t *testing.T) { { name: "parse rolling_grow_and_shrink configuration strategy", args: args{ - dt: tp770(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "2g", - "zone_count": 1, - }}, - "strategy": []interface{}{map[string]interface{}{ - "type": "rolling_grow_and_shrink", - }}, - }, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + HotTier: &ElasticsearchTopology{ + id: "hot_content", + Size: ec.String("2g"), + ZoneCount: 1, + }, + Strategy: ec.String("rolling_grow_and_shrink"), }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.7.0", + useNodeRoles: false, }, - - want: enrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ + want: testutil.EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1892,26 +1853,24 @@ func Test_expandEsResource(t *testing.T) { { name: "parse rolling configuration strategy", args: args{ - dt: tp770(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "2g", - "zone_count": 1, - }}, - "strategy": []interface{}{map[string]interface{}{ - "type": "rolling_all", - }}, - }, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + HotTier: &ElasticsearchTopology{ + id: "hot_content", + Size: ec.String("2g"), + ZoneCount: 1, + }, + Strategy: ec.String("rolling_all"), }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.7.0", + useNodeRoles: false, }, - want: enrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ + want: testutil.EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1969,16 +1928,17 @@ func Test_expandEsResource(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := expandEsResources(tt.args.ess, tt.args.dt) - if err != nil { - var msg string - if tt.err != nil { - msg = tt.err.Error() - } - assert.EqualError(t, err, msg) - } + var elasticsearch types.Object + diags := tfsdk.ValueFrom(context.Background(), tt.args.es, ElasticsearchSchema().FrameworkType(), &elasticsearch) + assert.Nil(t, diags) - assert.Equal(t, tt.want, got) + got, diags := ElasticsearchPayload(context.Background(), elasticsearch, tt.args.template, tt.args.templateID, tt.args.version, tt.args.useNodeRoles, false) + if tt.diags != nil { + assert.Equal(t, tt.diags, diags) + } else { + assert.Nil(t, diags) + assert.Equal(t, tt.want, got) + } }) } } diff --git a/ec/ecresource/deploymentresource/elasticsearch_flatteners_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go similarity index 63% rename from ec/ecresource/deploymentresource/elasticsearch_flatteners_test.go rename to ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go index 2d0f0c992..3623c11e5 100644 --- a/ec/ecresource/deploymentresource/elasticsearch_flatteners_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go @@ -15,35 +15,38 @@ // specific language governing permissions and limitations // under the License. -package deploymentresource +package v2 import ( + "context" "testing" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/stretchr/testify/assert" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" ) -func Test_flattenEsResource(t *testing.T) { +func Test_readElasticsearch(t *testing.T) { type args struct { in []*models.ElasticsearchResourceInfo - name string remotes models.RemoteResources } tests := []struct { - name string - args args - want []interface{} - err string + name string + args args + want *Elasticsearch + diags diag.Diagnostics }{ { name: "empty resource list returns empty list", args: args{in: []*models.ElasticsearchResourceInfo{}}, - want: []interface{}{}, + want: nil, }, { name: "empty current plan returns empty list", @@ -56,7 +59,7 @@ func Test_flattenEsResource(t *testing.T) { }, }, }}, - want: []interface{}{}, + want: nil, }, { name: "parses an elasticsearch resource", @@ -148,29 +151,25 @@ func Test_flattenEsResource(t *testing.T) { }, }, }}, - want: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "cloud_id": "some CLOUD ID", - "http_endpoint": "http://somecluster.cloud.elastic.co:9200", - "https_endpoint": "https://somecluster.cloud.elastic.co:9243", - "config": func() []interface{} { return nil }(), - "topology": []interface{}{ - map[string]interface{}{ - "config": func() []interface{} { return nil }(), - "id": "hot_content", - "instance_configuration_id": "aws.data.highio.i3", - "size": "2g", - "size_resource": "memory", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - "zone_count": int32(1), - }, - }, + want: &Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + CloudID: ec.String("some CLOUD ID"), + HttpEndpoint: ec.String("http://somecluster.cloud.elastic.co:9200"), + HttpsEndpoint: ec.String("https://somecluster.cloud.elastic.co:9243"), + Config: &ElasticsearchConfig{}, + HotTier: &ElasticsearchTopology{ + id: "hot_content", + InstanceConfigurationId: ec.String("aws.data.highio.i3"), + Size: ec.String("2g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 1, + Autoscaling: &ElasticsearchTopologyAutoscaling{}, }, }, }, @@ -227,56 +226,60 @@ func Test_flattenEsResource(t *testing.T) { }, }, }}, - want: []interface{}{map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "http_endpoint": "http://othercluster.cloud.elastic.co:9200", - "https_endpoint": "https://othercluster.cloud.elastic.co:9243", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: value2", - "user_settings_json": "{\"some.setting\":\"value\"}", - "user_settings_override_json": "{\"some.setting\":\"value2\"}", - }}, - "topology": []interface{}{map[string]interface{}{ - "config": func() []interface{} { return nil }(), - "id": "hot_content", - "instance_configuration_id": "aws.data.highio.i3", - "size": "2g", - "size_resource": "memory", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - "zone_count": int32(1), - }}, - }}, + want: &Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + HttpEndpoint: ec.String("http://othercluster.cloud.elastic.co:9200"), + HttpsEndpoint: ec.String("https://othercluster.cloud.elastic.co:9243"), + Config: &ElasticsearchConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), + }, + HotTier: &ElasticsearchTopology{ + id: "hot_content", + InstanceConfigurationId: ec.String("aws.data.highio.i3"), + Size: ec.String("2g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 1, + Autoscaling: &ElasticsearchTopologyAutoscaling{}, + }, + }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := flattenEsResources(tt.args.in, tt.args.name, tt.args.remotes) - if err != nil && !assert.EqualError(t, err, tt.err) { - t.Error(err) - } + got, err := ReadElasticsearches(tt.args.in, &tt.args.remotes) + assert.Nil(t, err) assert.Equal(t, tt.want, got) + + var esObj types.Object + diags := tfsdk.ValueFrom(context.Background(), got, ElasticsearchSchema().FrameworkType(), &esObj) + if tt.diags.HasError() { + assert.Equal(t, tt.diags, diags) + } }) } } -func Test_flattenEsTopology(t *testing.T) { +func Test_readElasticsearchTopology(t *testing.T) { type args struct { plan *models.ElasticsearchClusterPlan } tests := []struct { name string args args - want []interface{} + want ElasticsearchTopologies err string }{ { - name: "no zombie topologies", + name: "all topologies (even with 0 size) are returned", args: args{plan: &models.ElasticsearchClusterPlan{ ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ { @@ -302,17 +305,27 @@ func Test_flattenEsTopology(t *testing.T) { }, }, }}, - want: []interface{}{map[string]interface{}{ - "config": func() []interface{} { return nil }(), - "id": "hot_content", - "instance_configuration_id": "aws.data.highio.i3", - "size": "4g", - "size_resource": "memory", - "zone_count": int32(1), - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - }}, + want: ElasticsearchTopologies{ + { + id: "hot_content", + InstanceConfigurationId: ec.String("aws.data.highio.i3"), + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + Autoscaling: &ElasticsearchTopologyAutoscaling{}, + }, + { + id: "coordinating", + InstanceConfigurationId: ec.String("aws.coordinating.m5"), + Size: ec.String("0g"), + SizeResource: ec.String("memory"), + ZoneCount: 2, + Autoscaling: &ElasticsearchTopologyAutoscaling{}, + }, + }, }, { name: "includes unsized autoscaling topologies", @@ -348,32 +361,29 @@ func Test_flattenEsTopology(t *testing.T) { }, }, }}, - want: []interface{}{ - map[string]interface{}{ - "config": func() []interface{} { return nil }(), - "id": "hot_content", - "instance_configuration_id": "aws.data.highio.i3", - "size": "4g", - "size_resource": "memory", - "zone_count": int32(1), - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", + want: ElasticsearchTopologies{ + { + id: "hot_content", + InstanceConfigurationId: ec.String("aws.data.highio.i3"), + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + Autoscaling: &ElasticsearchTopologyAutoscaling{}, }, - map[string]interface{}{ - "config": func() []interface{} { return nil }(), - "id": "ml", - "instance_configuration_id": "aws.ml.m5", - "size": "0g", - "size_resource": "memory", - "zone_count": int32(1), - "autoscaling": []interface{}{ - map[string]interface{}{ - "max_size": "8g", - "max_size_resource": "memory", - "min_size": "0g", - "min_size_resource": "memory", - }, + { + id: "ml", + InstanceConfigurationId: ec.String("aws.ml.m5"), + Size: ec.String("0g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("8g"), + MaxSizeResource: ec.String("memory"), + MinSize: ec.String("0g"), + MinSizeResource: ec.String("memory"), }, }, }, @@ -381,7 +391,7 @@ func Test_flattenEsTopology(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := flattenEsTopology(tt.args.plan) + got, err := ReadElasticsearchTopologies(tt.args.plan) if err != nil && !assert.EqualError(t, err, tt.err) { t.Error(err) } @@ -390,37 +400,34 @@ func Test_flattenEsTopology(t *testing.T) { } } -func Test_flattenEsConfig(t *testing.T) { +func Test_readElasticsearchConfig(t *testing.T) { type args struct { cfg *models.ElasticsearchConfiguration } tests := []struct { name string args args - want []interface{} + want *ElasticsearchConfig }{ { - name: "flattens plugins allowlist", + name: "read plugins allowlist", args: args{cfg: &models.ElasticsearchConfiguration{ EnabledBuiltInPlugins: []string{"some-allowed-plugin"}, }}, - want: []interface{}{map[string]interface{}{ - "plugins": []interface{}{"some-allowed-plugin"}, - }}, + want: &ElasticsearchConfig{ + Plugins: []string{"some-allowed-plugin"}, + }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenEsConfig(tt.args.cfg) - for _, g := range got { - var rawVal []interface{} - m := g.(map[string]interface{}) - if v, ok := m["plugins"]; ok { - rawVal = v.(*schema.Set).List() - } - m["plugins"] = rawVal - } + got, err := ReadElasticsearchConfig(tt.args.cfg) + assert.Nil(t, err) assert.Equal(t, tt.want, got) + + var config types.Object + diags := tfsdk.ValueFrom(context.Background(), got, ElasticsearchConfigSchema().FrameworkType(), &config) + assert.Nil(t, diags) }) } } diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_remote_cluster.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_remote_cluster.go new file mode 100644 index 000000000..7bdfe2fe9 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_remote_cluster.go @@ -0,0 +1,118 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ElasticsearchRemoteClusterTF struct { + DeploymentId types.String `tfsdk:"deployment_id"` + Alias types.String `tfsdk:"alias"` + RefId types.String `tfsdk:"ref_id"` + SkipUnavailable types.Bool `tfsdk:"skip_unavailable"` +} + +type ElasticsearchRemoteCluster struct { + DeploymentId *string `tfsdk:"deployment_id"` + Alias *string `tfsdk:"alias"` + RefId *string `tfsdk:"ref_id"` + SkipUnavailable *bool `tfsdk:"skip_unavailable"` +} + +type ElasticsearchRemoteClusters []ElasticsearchRemoteCluster + +func ReadElasticsearchRemoteClusters(in []*models.RemoteResourceRef) (ElasticsearchRemoteClusters, error) { + if len(in) == 0 { + return nil, nil + } + + clusters := make(ElasticsearchRemoteClusters, 0, len(in)) + + for _, model := range in { + cluster, err := ReadElasticsearchRemoteCluster(model) + if err != nil { + return nil, err + } + // clusters[*cluster.DeploymentId] = *cluster + clusters = append(clusters, *cluster) + } + + return clusters, nil +} + +func ElasticsearchRemoteClustersPayload(ctx context.Context, clustersTF types.Set) (*models.RemoteResources, diag.Diagnostics) { + payloads := models.RemoteResources{Resources: []*models.RemoteResourceRef{}} + + for _, elem := range clustersTF.Elems { + var cluster ElasticsearchRemoteClusterTF + diags := tfsdk.ValueAs(ctx, elem, &cluster) + + if diags.HasError() { + return nil, diags + } + var payload models.RemoteResourceRef + + if !cluster.DeploymentId.IsNull() { + payload.DeploymentID = &cluster.DeploymentId.Value + } + + if !cluster.RefId.IsNull() { + payload.ElasticsearchRefID = &cluster.RefId.Value + } + + if !cluster.Alias.IsNull() { + payload.Alias = &cluster.Alias.Value + } + + if !cluster.SkipUnavailable.IsNull() { + payload.SkipUnavailable = &cluster.SkipUnavailable.Value + } + + payloads.Resources = append(payloads.Resources, &payload) + } + + return &payloads, nil +} + +func ReadElasticsearchRemoteCluster(in *models.RemoteResourceRef) (*ElasticsearchRemoteCluster, error) { + var cluster ElasticsearchRemoteCluster + + if in.DeploymentID != nil && *in.DeploymentID != "" { + cluster.DeploymentId = in.DeploymentID + } + + if in.ElasticsearchRefID != nil && *in.ElasticsearchRefID != "" { + cluster.RefId = in.ElasticsearchRefID + } + + if in.Alias != nil && *in.Alias != "" { + cluster.Alias = in.Alias + } + + if in.SkipUnavailable != nil { + cluster.SkipUnavailable = in.SkipUnavailable + } + + return &cluster, nil +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_snapshot_source.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_snapshot_source.go new file mode 100644 index 000000000..279979f98 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_snapshot_source.go @@ -0,0 +1,62 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/elastic/cloud-sdk-go/pkg/models" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v1" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +type ElasticsearchSnapshotSource v1.ElasticsearchSnapshotSource + +func elasticsearchSnapshotSourcePayload(ctx context.Context, srcObj attr.Value, payload *models.ElasticsearchClusterPlan) diag.Diagnostics { + var snapshot *v1.ElasticsearchSnapshotSourceTF + + if srcObj.IsNull() || srcObj.IsUnknown() { + return nil + } + + if diags := tfsdk.ValueAs(ctx, srcObj, &snapshot); diags.HasError() { + return diags + } + + if snapshot == nil { + return nil + } + + if payload.Transient == nil { + payload.Transient = &models.TransientElasticsearchPlanConfiguration{ + RestoreSnapshot: &models.RestoreSnapshotConfiguration{}, + } + } + + if !snapshot.SourceElasticsearchClusterId.IsNull() { + payload.Transient.RestoreSnapshot.SourceClusterID = snapshot.SourceElasticsearchClusterId.Value + } + + if !snapshot.SnapshotName.IsNull() { + payload.Transient.RestoreSnapshot.SnapshotName = &snapshot.SnapshotName.Value + } + + return nil +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go new file mode 100644 index 000000000..dba27a3cc --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go @@ -0,0 +1,394 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deploymentsize" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v1" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ElasticsearchTopologyTF struct { + InstanceConfigurationId types.String `tfsdk:"instance_configuration_id"` + Size types.String `tfsdk:"size"` + SizeResource types.String `tfsdk:"size_resource"` + ZoneCount types.Int64 `tfsdk:"zone_count"` + NodeTypeData types.String `tfsdk:"node_type_data"` + NodeTypeMaster types.String `tfsdk:"node_type_master"` + NodeTypeIngest types.String `tfsdk:"node_type_ingest"` + NodeTypeMl types.String `tfsdk:"node_type_ml"` + NodeRoles types.Set `tfsdk:"node_roles"` + Autoscaling types.Object `tfsdk:"autoscaling"` +} + +type ElasticsearchTopology struct { + id string + InstanceConfigurationId *string `tfsdk:"instance_configuration_id"` + Size *string `tfsdk:"size"` + SizeResource *string `tfsdk:"size_resource"` + ZoneCount int `tfsdk:"zone_count"` + NodeTypeData *string `tfsdk:"node_type_data"` + NodeTypeMaster *string `tfsdk:"node_type_master"` + NodeTypeIngest *string `tfsdk:"node_type_ingest"` + NodeTypeMl *string `tfsdk:"node_type_ml"` + NodeRoles []string `tfsdk:"node_roles"` + Autoscaling *ElasticsearchTopologyAutoscaling `tfsdk:"autoscaling"` +} + +func CreateTierForTest(tierId string, tier ElasticsearchTopology) *ElasticsearchTopology { + res := tier + res.id = tierId + return &res +} + +type ElasticsearchTopologyAutoscaling v1.ElasticsearchTopologyAutoscaling + +func (topology ElasticsearchTopologyTF) Payload(ctx context.Context, topologyID string, planTopologies []*models.ElasticsearchClusterTopologyElement) diag.Diagnostics { + var diags diag.Diagnostics + + topologyElem, err := matchEsTopologyID(topologyID, planTopologies) + if err != nil { + diags.AddError("topology matching error", err.Error()) + return diags + } + + size, err := converters.ParseTopologySizeTF(topology.Size, topology.SizeResource) + if err != nil { + diags.AddError("size parsing error", err.Error()) + } + + if size != nil { + topologyElem.Size = size + } + + if topology.ZoneCount.Value > 0 { + topologyElem.ZoneCount = int32(topology.ZoneCount.Value) + } + + if err := topology.ParseLegacyNodeType(topologyElem.NodeType); err != nil { + diags.AddError("topology legacy node type error", err.Error()) + } + + var nodeRoles []string + ds := topology.NodeRoles.ElementsAs(ctx, &nodeRoles, true) + diags.Append(ds...) + + if !ds.HasError() && len(nodeRoles) > 0 { + topologyElem.NodeRoles = nodeRoles + topologyElem.NodeType = nil + } + + diags.Append(ElasticsearchTopologyAutoscalingPayload(ctx, topology.Autoscaling, topologyID, topologyElem)...) + + diags = append(diags, ds...) + + return diags +} + +func ReadElasticsearchTopologies(in *models.ElasticsearchClusterPlan) (ElasticsearchTopologies, error) { + if len(in.ClusterTopology) == 0 { + return nil, nil + } + + tops := make([]ElasticsearchTopology, 0, len(in.ClusterTopology)) + + for _, model := range in.ClusterTopology { + // if !v1.IsPotentiallySizedTopology(model, in.AutoscalingEnabled != nil && *in.AutoscalingEnabled) { + // continue + // } + + topology, err := ReadElasticsearchTopology(model) + if err != nil { + return nil, err + } + tops = append(tops, *topology) + } + + return tops, nil +} + +func ReadElasticsearchTopology(model *models.ElasticsearchClusterTopologyElement) (*ElasticsearchTopology, error) { + var topology ElasticsearchTopology + + topology.id = model.ID + + if model.InstanceConfigurationID != "" { + topology.InstanceConfigurationId = &model.InstanceConfigurationID + } + + if model.Size != nil { + topology.Size = ec.String(util.MemoryToState(*model.Size.Value)) + topology.SizeResource = model.Size.Resource + } + + topology.ZoneCount = int(model.ZoneCount) + + if nt := model.NodeType; nt != nil { + if nt.Data != nil { + topology.NodeTypeData = ec.String(strconv.FormatBool(*nt.Data)) + } + + if nt.Ingest != nil { + topology.NodeTypeIngest = ec.String(strconv.FormatBool(*nt.Ingest)) + } + + if nt.Master != nil { + topology.NodeTypeMaster = ec.String(strconv.FormatBool(*nt.Master)) + } + + if nt.Ml != nil { + topology.NodeTypeMl = ec.String(strconv.FormatBool(*nt.Ml)) + } + } + + topology.NodeRoles = model.NodeRoles + + autoscaling, err := ReadElasticsearchTopologyAutoscaling(model) + if err != nil { + return nil, err + } + topology.Autoscaling = autoscaling + + return &topology, nil +} + +func ReadElasticsearchTopologyAutoscaling(topology *models.ElasticsearchClusterTopologyElement) (*ElasticsearchTopologyAutoscaling, error) { + var a ElasticsearchTopologyAutoscaling + + if ascale := topology.AutoscalingMax; ascale != nil { + a.MaxSizeResource = ascale.Resource + a.MaxSize = ec.String(util.MemoryToState(*ascale.Value)) + } + + if ascale := topology.AutoscalingMin; ascale != nil { + a.MinSizeResource = ascale.Resource + a.MinSize = ec.String(util.MemoryToState(*ascale.Value)) + } + + if topology.AutoscalingPolicyOverrideJSON != nil { + b, err := json.Marshal(topology.AutoscalingPolicyOverrideJSON) + if err != nil { + return nil, fmt.Errorf("elasticsearch topology %s: unable to persist policy_override_json - %w", topology.ID, err) + } + a.PolicyOverrideJson = ec.String(string(b)) + } + + return &a, nil +} + +func (topology *ElasticsearchTopologyTF) ParseLegacyNodeType(nodeType *models.ElasticsearchNodeType) error { + if nodeType == nil { + return nil + } + + if topology.NodeTypeData.Value != "" { + nt, err := strconv.ParseBool(topology.NodeTypeData.Value) + if err != nil { + return fmt.Errorf("failed parsing node_type_data value: %w", err) + } + nodeType.Data = &nt + } + + if topology.NodeTypeMaster.Value != "" { + nt, err := strconv.ParseBool(topology.NodeTypeMaster.Value) + if err != nil { + return fmt.Errorf("failed parsing node_type_master value: %w", err) + } + nodeType.Master = &nt + } + + if topology.NodeTypeIngest.Value != "" { + nt, err := strconv.ParseBool(topology.NodeTypeIngest.Value) + if err != nil { + return fmt.Errorf("failed parsing node_type_ingest value: %w", err) + } + nodeType.Ingest = &nt + } + + if topology.NodeTypeMl.Value != "" { + nt, err := strconv.ParseBool(topology.NodeTypeMl.Value) + if err != nil { + return fmt.Errorf("failed parsing node_type_ml value: %w", err) + } + nodeType.Ml = &nt + } + + return nil +} + +func (topology *ElasticsearchTopologyTF) HasNodeType() bool { + return topology.NodeTypeData.Value != "" || + topology.NodeTypeIngest.Value != "" || + topology.NodeTypeMaster.Value != "" || + topology.NodeTypeMl.Value != "" +} + +func ObjectToTopology(ctx context.Context, obj types.Object) (*ElasticsearchTopologyTF, diag.Diagnostics) { + if obj.IsNull() || obj.IsUnknown() { + return nil, nil + } + + var topology *ElasticsearchTopologyTF + + if diags := tfsdk.ValueAs(ctx, obj, &topology); diags.HasError() { + return nil, diags + } + + return topology, nil +} + +type ElasticsearchTopologies []ElasticsearchTopology + +func (tops ElasticsearchTopologies) Set() map[string]ElasticsearchTopology { + set := make(map[string]ElasticsearchTopology, len(tops)) + + for _, top := range tops { + set[top.id] = top + } + + return set +} + +func matchEsTopologyID(id string, topologies []*models.ElasticsearchClusterTopologyElement) (*models.ElasticsearchClusterTopologyElement, error) { + for _, t := range topologies { + if t.ID == id { + return t, nil + } + } + + topIDs := topologyIDs(topologies) + for i, id := range topIDs { + topIDs[i] = "\"" + id + "\"" + } + + return nil, fmt.Errorf(`invalid id ('%s'): valid topology IDs are %s`, id, strings.Join(topIDs, ", ")) +} + +func topologyIDs(topologies []*models.ElasticsearchClusterTopologyElement) []string { + var result []string + + for _, topology := range topologies { + result = append(result, topology.ID) + } + + if len(result) == 0 { + return nil + } + return result +} + +func ElasticsearchTopologyAutoscalingPayload(ctx context.Context, autoObj attr.Value, topologyID string, payload *models.ElasticsearchClusterTopologyElement) diag.Diagnostics { + var diag diag.Diagnostics + + if autoObj.IsNull() || autoObj.IsUnknown() { + return nil + } + + // it should be only one element if any + var autoscale v1.ElasticsearchTopologyAutoscalingTF + + if diags := tfsdk.ValueAs(ctx, autoObj, &autoscale); diags.HasError() { + return diags + } + + if autoscale == (v1.ElasticsearchTopologyAutoscalingTF{}) { + return nil + } + + if !autoscale.MinSize.IsNull() && !autoscale.MinSize.IsUnknown() { + if payload.AutoscalingMin == nil { + payload.AutoscalingMin = new(models.TopologySize) + } + + err := expandAutoscalingDimension(autoscale, payload.AutoscalingMin, autoscale.MinSize, autoscale.MinSizeResource) + if err != nil { + diag.AddError("fail to parse autoscale min size", err.Error()) + return diag + } + + if reflect.DeepEqual(payload.AutoscalingMin, new(models.TopologySize)) { + payload.AutoscalingMin = nil + } + } + + if !autoscale.MaxSize.IsNull() && !autoscale.MaxSize.IsUnknown() { + if payload.AutoscalingMax == nil { + payload.AutoscalingMax = new(models.TopologySize) + } + + err := expandAutoscalingDimension(autoscale, payload.AutoscalingMax, autoscale.MaxSize, autoscale.MaxSizeResource) + if err != nil { + diag.AddError("fail to parse autoscale max size", err.Error()) + return diag + } + + if reflect.DeepEqual(payload.AutoscalingMax, new(models.TopologySize)) { + payload.AutoscalingMax = nil + } + } + + if autoscale.PolicyOverrideJson.Value != "" { + if err := json.Unmarshal([]byte(autoscale.PolicyOverrideJson.Value), + &payload.AutoscalingPolicyOverrideJSON, + ); err != nil { + diag.AddError(fmt.Sprintf("elasticsearch topology %s: unable to load policy_override_json", topologyID), err.Error()) + return diag + } + } + + return diag +} + +// expandAutoscalingDimension centralises processing of %_size and %_size_resource attributes +// Due to limitations in the Terraform SDK, it's not possible to specify a Default on a Computed schema member +// to work around this limitation, this function will default the %_size_resource attribute to `memory`. +// Without this default, setting autoscaling limits on tiers which do not have those limits in the deployment +// template leads to an API error due to the empty resource field on the TopologySize model. +func expandAutoscalingDimension(autoscale v1.ElasticsearchTopologyAutoscalingTF, model *models.TopologySize, size, sizeResource types.String) error { + if size.Value != "" { + val, err := deploymentsize.ParseGb(size.Value) + if err != nil { + return err + } + model.Value = &val + + if model.Resource == nil { + model.Resource = ec.String("memory") + } + } + + if sizeResource.Value != "" { + model.Resource = &sizeResource.Value + } + + return nil +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_trust_account.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_trust_account.go new file mode 100644 index 000000000..4d06125b8 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_trust_account.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "github.com/elastic/cloud-sdk-go/pkg/models" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v1" +) + +type ElasticsearchTrustAccounts v1.ElasticsearchTrustAccounts + +func ReadElasticsearchTrustAccounts(in *models.ElasticsearchClusterSettings) (ElasticsearchTrustAccounts, error) { + if in == nil || in.Trust == nil { + return nil, nil + } + + accounts := make(ElasticsearchTrustAccounts, 0, len(in.Trust.Accounts)) + + for _, model := range in.Trust.Accounts { + account, err := ReadElasticsearchTrustAccount(model) + if err != nil { + return nil, err + } + accounts = append(accounts, *account) + } + + return accounts, nil +} + +func ReadElasticsearchTrustAccount(in *models.AccountTrustRelationship) (*v1.ElasticsearchTrustAccount, error) { + var acc v1.ElasticsearchTrustAccount + + if in.AccountID != nil { + acc.AccountId = in.AccountID + } + + if in.TrustAll != nil { + acc.TrustAll = in.TrustAll + } + + acc.TrustAllowlist = in.TrustAllowlist + + return &acc, nil +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_trust_external.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_trust_external.go new file mode 100644 index 000000000..a2f3d6691 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_trust_external.go @@ -0,0 +1,168 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/elastic/cloud-sdk-go/pkg/models" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v1" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ElasticsearchTrustExternals v1.ElasticsearchTrustExternals + +func ReadElasticsearchTrustExternals(in *models.ElasticsearchClusterSettings) (ElasticsearchTrustExternals, error) { + if in == nil || in.Trust == nil { + return nil, nil + } + + externals := make(ElasticsearchTrustExternals, 0, len(in.Trust.External)) + + for _, model := range in.Trust.External { + external, err := ReadElasticsearchTrustExternal(model) + if err != nil { + return nil, err + } + externals = append(externals, *external) + } + + return externals, nil +} + +func ElasticsearchTrustExternalPayload(ctx context.Context, externals types.Set, model *models.ElasticsearchClusterSettings) (*models.ElasticsearchClusterSettings, diag.Diagnostics) { + var diags diag.Diagnostics + + payloads := make([]*models.ExternalTrustRelationship, 0, len(externals.Elems)) + + for _, elem := range externals.Elems { + var external v1.ElasticsearchTrustExternalTF + + ds := tfsdk.ValueAs(ctx, elem, &external) + + diags = append(diags, ds...) + + if diags.HasError() { + continue + } + + id := external.RelationshipId.Value + all := external.TrustAll.Value + + payload := &models.ExternalTrustRelationship{ + TrustRelationshipID: &id, + TrustAll: &all, + } + + ds = external.TrustAllowlist.ElementsAs(ctx, &payload.TrustAllowlist, true) + + diags = append(diags, ds...) + + if ds.HasError() { + continue + } + + payloads = append(payloads, payload) + } + + if len(payloads) == 0 { + return model, nil + } + + if model == nil { + model = &models.ElasticsearchClusterSettings{} + } + + if model.Trust == nil { + model.Trust = &models.ElasticsearchClusterTrustSettings{} + } + + model.Trust.External = append(model.Trust.External, payloads...) + + return model, nil +} + +func ReadElasticsearchTrustExternal(in *models.ExternalTrustRelationship) (*v1.ElasticsearchTrustExternal, error) { + var ext v1.ElasticsearchTrustExternal + + if in.TrustRelationshipID != nil { + ext.RelationshipId = in.TrustRelationshipID + } + + if in.TrustAll != nil { + ext.TrustAll = in.TrustAll + } + + ext.TrustAllowlist = in.TrustAllowlist + + return &ext, nil +} + +func ElasticsearchTrustAccountPayload(ctx context.Context, accounts types.Set, model *models.ElasticsearchClusterSettings) (*models.ElasticsearchClusterSettings, diag.Diagnostics) { + var diags diag.Diagnostics + + payloads := make([]*models.AccountTrustRelationship, 0, len(accounts.Elems)) + + for _, elem := range accounts.Elems { + var account v1.ElasticsearchTrustAccountTF + + ds := tfsdk.ValueAs(ctx, elem, &account) + + diags = append(diags, ds...) + + if ds.HasError() { + continue + } + + id := account.AccountId.Value + all := account.TrustAll.Value + + payload := &models.AccountTrustRelationship{ + AccountID: &id, + TrustAll: &all, + } + + ds = account.TrustAllowlist.ElementsAs(ctx, &payload.TrustAllowlist, true) + + diags = append(diags, ds...) + + if ds.HasError() { + continue + } + + payloads = append(payloads, payload) + } + + if len(payloads) == 0 { + return model, nil + } + + if model == nil { + model = &models.ElasticsearchClusterSettings{} + } + + if model.Trust == nil { + model.Trust = &models.ElasticsearchClusterTrustSettings{} + } + + model.Trust.Accounts = append(model.Trust.Accounts, payloads...) + + return model, nil +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier.go new file mode 100644 index 000000000..b66a0cbf4 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier.go @@ -0,0 +1,106 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Use `self` as value of `observability`'s `deployment_id` attribute +func UseNodeRolesDefault() tfsdk.AttributePlanModifier { + return nodeRolesDefault{} +} + +type nodeRolesDefault struct{} + +func (r nodeRolesDefault) Modify(ctx context.Context, req tfsdk.ModifyAttributePlanRequest, resp *tfsdk.ModifyAttributePlanResponse) { + if req.AttributeState == nil || resp.AttributePlan == nil || req.AttributeConfig == nil { + return + } + + if !resp.AttributePlan.IsUnknown() { + return + } + + // if the config is the unknown value, use the unknown value otherwise, interpolation gets messed up + if req.AttributeConfig.IsUnknown() { + return + } + + // if there is no state for "version" return + var stateVersion types.String + + if diags := req.State.GetAttribute(ctx, path.Root("version"), &stateVersion); diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } + + if stateVersion.IsNull() { + return + } + + // if template changed return + templateChanged, diags := isAttributeChanged(ctx, path.Root("deployment_template_id"), req) + + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return + } + + if templateChanged { + return + } + + // get version for plan and state and calculate useNodeRoles + + var planVersion types.String + + if diags := req.Plan.GetAttribute(ctx, path.Root("version"), &planVersion); diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } + + useNodeRoles, diags := utils.UseNodeRoles(stateVersion, planVersion) + + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } + + if useNodeRoles && req.AttributeState.IsNull() { + return + } + + resp.AttributePlan = req.AttributeState +} + +// Description returns a human-readable description of the plan modifier. +func (r nodeRolesDefault) Description(ctx context.Context) string { + return "Use current state if it's still valid." +} + +// MarkdownDescription returns a markdown description of the plan modifier. +func (r nodeRolesDefault) MarkdownDescription(ctx context.Context) string { + return "Use current state if it's still valid." +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier.go new file mode 100644 index 000000000..a511428f8 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier.go @@ -0,0 +1,124 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Use `self` as value of `observability`'s `deployment_id` attribute +func UseNodeTypesDefault() tfsdk.AttributePlanModifier { + return nodeTypesDefault{} +} + +type nodeTypesDefault struct{} + +func (r nodeTypesDefault) Modify(ctx context.Context, req tfsdk.ModifyAttributePlanRequest, resp *tfsdk.ModifyAttributePlanResponse) { + if req.AttributeState == nil || resp.AttributePlan == nil || req.AttributeConfig == nil { + return + } + + if !resp.AttributePlan.IsUnknown() { + return + } + + // if the config is the unknown value, use the unknown value otherwise, interpolation gets messed up + if req.AttributeConfig.IsUnknown() { + return + } + + // if there is no state for "version" return + var stateVersion types.String + + if diags := req.State.GetAttribute(ctx, path.Root("version"), &stateVersion); diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } + + if stateVersion.IsNull() { + return + } + + // if template changed return + templateChanged, diags := isAttributeChanged(ctx, path.Root("deployment_template_id"), req) + + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return + } + + if templateChanged { + return + } + + // get version for plan and state and calculate useNodeRoles + + var planVersion types.String + + if diags := req.Plan.GetAttribute(ctx, path.Root("version"), &planVersion); diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } + + useNodeRoles, diags := utils.UseNodeRoles(stateVersion, planVersion) + + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } + + if useNodeRoles && !req.AttributeState.IsNull() { + return + } + + resp.AttributePlan = req.AttributeState +} + +// Description returns a human-readable description of the plan modifier. +func (r nodeTypesDefault) Description(ctx context.Context) string { + return "Use current state if it's still valid." +} + +// MarkdownDescription returns a markdown description of the plan modifier. +func (r nodeTypesDefault) MarkdownDescription(ctx context.Context) string { + return "Use current state if it's still valid." +} + +func isAttributeChanged(ctx context.Context, p path.Path, req tfsdk.ModifyAttributePlanRequest) (bool, diag.Diagnostics) { + var planValue attr.Value + + if diags := req.Plan.GetAttribute(ctx, p, &planValue); diags.HasError() { + return false, diags + } + + var stateValue attr.Value + + if diags := req.State.GetAttribute(ctx, p, &stateValue); diags.HasError() { + return false, diags + } + + return !planValue.Equal(stateValue), nil +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go new file mode 100644 index 000000000..251e8bed8 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go @@ -0,0 +1,479 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "strings" + + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/elastic/terraform-provider-ec/ec/internal/validators" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// These constants are only used to determine whether or not a dedicated +// tier of masters or ingest (coordinating) nodes are set. +const ( + dataTierRolePrefix = "data_" + ingestDataTierRole = "ingest" + masterDataTierRole = "master" + autodetect = "autodetect" + growAndShrink = "grow_and_shrink" + rollingGrowAndShrink = "rolling_grow_and_shrink" + rollingAll = "rolling_all" +) + +// List of update strategies availables. +var strategiesList = []string{ + autodetect, growAndShrink, rollingGrowAndShrink, rollingAll, +} + +func ElasticsearchSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Required Elasticsearch resource definition", + Required: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "autoscale": { + Type: types.StringType, + Description: `Enable or disable autoscaling. Defaults to the setting coming from the deployment template. Accepted values are "true" or "false".`, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "ref_id": { + Type: types.StringType, + Description: "Optional ref_id to set on the Elasticsearch resource", + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), + resource.UseStateForUnknown(), + }, + }, + "resource_id": { + Type: types.StringType, + Description: "The Elasticsearch resource unique identifier", + Computed: true, + }, + "region": { + Type: types.StringType, + Description: "The Elasticsearch resource region", + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "cloud_id": { + Type: types.StringType, + Description: "The encoded Elasticsearch credentials to use in Beats or Logstash", + Computed: true, + }, + "http_endpoint": { + Type: types.StringType, + Description: "The Elasticsearch resource HTTP endpoint", + Computed: true, + }, + "https_endpoint": { + Type: types.StringType, + Description: "The Elasticsearch resource HTTPs endpoint", + Computed: true, + }, + + "hot": ElasticsearchTierSchema("'hot' optional topology element", true, "hot"), + "coordinating": ElasticsearchTierSchema("'coordinating' optional topology element", false, "coordinating"), + "master": ElasticsearchTierSchema("'master' optional topology element", false, "master"), + "warm": ElasticsearchTierSchema("'warm' optional topology element", false, "warm"), + "cold": ElasticsearchTierSchema("'cold' optional topology element", false, "cold"), + "frozen": ElasticsearchTierSchema("'frozen' optional topology element", false, "frozen"), + "ml": ElasticsearchTierSchema("'ml' optional topology element", false, "ml"), + + "trust_account": ElasticsearchTrustAccountSchema(), + + "trust_external": ElasticsearchTrustExternalSchema(), + + "config": ElasticsearchConfigSchema(), + + "remote_cluster": ElasticsearchRemoteClusterSchema(), + + "snapshot_source": ElasticsearchSnapshotSourceSchema(), + + "extension": ElasticsearchExtensionSchema(), + + "strategy": { + Description: "Configuration strategy type " + strings.Join(strategiesList, ", "), + Type: types.StringType, + Optional: true, + Validators: []tfsdk.AttributeValidator{validators.OneOf(strategiesList)}, + }, + }), + } +} + +func ElasticsearchConfigSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: `Optional Elasticsearch settings which will be applied to all topologies unless overridden on the topology element`, + Optional: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + // TODO + // DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + "docker_image": { + Type: types.StringType, + Description: "Optionally override the docker image the Elasticsearch nodes will use. Note that this field will only work for internal users only.", + Optional: true, + }, + "plugins": { + Type: types.SetType{ + ElemType: types.StringType, + }, + Description: "List of Elasticsearch supported plugins, which vary from version to version. Check the Stack Pack version to see which plugins are supported for each version. This is currently only available from the UI and [ecctl](https://www.elastic.co/guide/en/ecctl/master/ecctl_stack_list.html)", + Optional: true, + Computed: true, + }, + "user_settings_json": { + Type: types.StringType, + Description: `JSON-formatted user level "elasticsearch.yml" setting overrides`, + Optional: true, + }, + "user_settings_override_json": { + Type: types.StringType, + Description: `JSON-formatted admin (ECE) level "elasticsearch.yml" setting overrides`, + Optional: true, + }, + "user_settings_yaml": { + Type: types.StringType, + Description: `YAML-formatted user level "elasticsearch.yml" setting overrides`, + Optional: true, + }, + "user_settings_override_yaml": { + Type: types.StringType, + Description: `YAML-formatted admin (ECE) level "elasticsearch.yml" setting overrides`, + Optional: true, + }, + }), + } +} + +func ElasticsearchTopologyAutoscalingSchema(tierName string) tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Elasticsearch autoscaling settings, such a maximum and minimum size and resources.", + // Optional: true, + // Computed: true, + Required: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "max_size_resource": { + Description: "Maximum resource type for the maximum autoscaling setting.", + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + UseTierStateForUnknown(tierName), + }, + }, + "max_size": { + Description: "Maximum size value for the maximum autoscaling setting.", + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + UseTierStateForUnknown(tierName), + }, + }, + "min_size_resource": { + Description: "Minimum resource type for the minimum autoscaling setting.", + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + UseTierStateForUnknown(tierName), + }, + }, + "min_size": { + Description: "Minimum size value for the minimum autoscaling setting.", + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + UseTierStateForUnknown(tierName), + }, + }, + "policy_override_json": { + Type: types.StringType, + Description: "Computed policy overrides set directly via the API or other clients.", + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + UseTierStateForUnknown(tierName), + }, + }, + }), + } +} + +func ElasticsearchRemoteClusterSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Elasticsearch remote clusters to configure for the Elasticsearch resource, can be set multiple times", + Optional: true, + Attributes: tfsdk.SetNestedAttributes(map[string]tfsdk.Attribute{ + "deployment_id": { + Description: "Remote deployment ID", + Type: types.StringType, + // TODO fix examples/deployment_css/deployment.tf#61 + // Validators: []tfsdk.AttributeValidator{validators.Length(32, 32)}, + Required: true, + }, + "alias": { + Description: "Alias for this Cross Cluster Search binding", + Type: types.StringType, + // TODO fix examples/deployment_css/deployment.tf#62 + // Validators: []tfsdk.AttributeValidator{validators.NotEmpty()}, + Required: true, + }, + "ref_id": { + Description: `Remote elasticsearch "ref_id", it is best left to the default value`, + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), + resource.UseStateForUnknown(), + }, + Optional: true, + }, + "skip_unavailable": { + Description: "If true, skip the cluster during search when disconnected", + Type: types.BoolType, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.Bool{Value: false}), + }, + Computed: true, + Optional: true, + }, + }), + } +} + +func ElasticsearchSnapshotSourceSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional snapshot source settings. Restore data from a snapshot of another deployment.", + Optional: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "source_elasticsearch_cluster_id": { + Description: "ID of the Elasticsearch cluster that will be used as the source of the snapshot", + Type: types.StringType, + Required: true, + }, + "snapshot_name": { + Description: "Name of the snapshot to restore. Use '__latest_success__' to get the most recent successful snapshot.", + Type: types.StringType, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "__latest_success__"}), + resource.UseStateForUnknown(), + }, + Optional: true, + Computed: true, + }, + }), + } +} + +func ElasticsearchExtensionSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Elasticsearch extensions such as custom bundles or plugins.", + Optional: true, + Attributes: tfsdk.SetNestedAttributes(map[string]tfsdk.Attribute{ + "name": { + Description: "Extension name.", + Type: types.StringType, + Required: true, + }, + "type": { + Description: "Extension type, only `bundle` or `plugin` are supported.", + Type: types.StringType, + Required: true, + Validators: []tfsdk.AttributeValidator{validators.OneOf([]string{`"bundle"`, `"plugin"`})}, + }, + "version": { + Description: "Elasticsearch compatibility version. Bundles should specify major or minor versions with wildcards, such as `7.*` or `*` but **plugins must use full version notation down to the patch level**, such as `7.10.1` and wildcards are not allowed.", + Type: types.StringType, + Required: true, + }, + "url": { + Description: "Bundle or plugin URL, the extension URL can be obtained from the `ec_deployment_extension..url` attribute or the API and cannot be a random HTTP address that is hosted elsewhere.", + Type: types.StringType, + Required: true, + }, + }), + } +} + +func ElasticsearchTrustAccountSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Elasticsearch account trust settings.", + Attributes: tfsdk.SetNestedAttributes(map[string]tfsdk.Attribute{ + "account_id": { + Description: "The ID of the Account.", + Type: types.StringType, + Required: true, + }, + "trust_all": { + Description: "If true, all clusters in this account will by default be trusted and the `trust_allowlist` is ignored.", + Type: types.BoolType, + Required: true, + }, + "trust_allowlist": { + Description: "The list of clusters to trust. Only used when `trust_all` is false.", + Type: types.SetType{ + ElemType: types.StringType, + }, + Optional: true, + }, + }), + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + } +} + +func ElasticsearchTrustExternalSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Elasticsearch external trust settings.", + Attributes: tfsdk.SetNestedAttributes(map[string]tfsdk.Attribute{ + "relationship_id": { + Description: "The ID of the external trust relationship.", + Type: types.StringType, + Required: true, + }, + "trust_all": { + Description: "If true, all clusters in this account will by default be trusted and the `trust_allowlist` is ignored.", + Type: types.BoolType, + Required: true, + }, + "trust_allowlist": { + Description: "The list of clusters to trust. Only used when `trust_all` is false.", + Type: types.SetType{ + ElemType: types.StringType, + }, + Optional: true, + }, + }), + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + } +} + +func ElasticsearchTierSchema(description string, required bool, tierName string) tfsdk.Attribute { + return tfsdk.Attribute{ + Optional: !required, + // it should be Computed but Computed triggers TF weird behaviour that leads to unempty plan for zero change config + // Computed: true, + Required: required, + Description: description, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "instance_configuration_id": { + Type: types.StringType, + Description: `Computed Instance Configuration ID of the topology element`, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseTierStateForUnknown(tierName), + }, + }, + "size": { + Type: types.StringType, + Description: `Optional amount of memory per node in the "g" notation`, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseTierStateForUnknown(tierName), + }, + }, + "size_resource": { + Type: types.StringType, + Description: `Optional size type, defaults to "memory".`, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "memory"}), + UseTierStateForUnknown(tierName), + }, + }, + "zone_count": { + Type: types.Int64Type, + Description: `Optional number of zones that the Elasticsearch cluster will span. This is used to set HA`, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + UseTierStateForUnknown(tierName), + }, + }, + "node_type_data": { + Type: types.StringType, + Description: `The node type for the Elasticsearch Topology element (data node)`, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseNodeTypesDefault(), + }, + }, + "node_type_master": { + Type: types.StringType, + Description: `The node type for the Elasticsearch Topology element (master node)`, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseNodeTypesDefault(), + }, + }, + "node_type_ingest": { + Type: types.StringType, + Description: `The node type for the Elasticsearch Topology element (ingest node)`, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseNodeTypesDefault(), + }, + }, + "node_type_ml": { + Type: types.StringType, + Description: `The node type for the Elasticsearch Topology element (machine learning node)`, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseNodeTypesDefault(), + }, + }, + "node_roles": { + Type: types.SetType{ + ElemType: types.StringType, + }, + Description: `The computed list of node roles for the current topology element`, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseNodeRolesDefault(), + }, + }, + "autoscaling": ElasticsearchTopologyAutoscalingSchema(tierName), + }), + } +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier.go b/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier.go new file mode 100644 index 000000000..f8391a78c --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier.go @@ -0,0 +1,112 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +func UseTierStateForUnknown(tier string) tfsdk.AttributePlanModifier { + return useTierState{tier: tier} +} + +type useTierState struct { + tier string +} + +func (m useTierState) Modify(ctx context.Context, req tfsdk.ModifyAttributePlanRequest, resp *tfsdk.ModifyAttributePlanResponse) { + if req.AttributeState == nil || resp.AttributePlan == nil || req.AttributeConfig == nil { + return + } + + if !resp.AttributePlan.IsUnknown() { + return + } + + // if the config is the unknown value, use the unknown value otherwise, interpolation gets messed up + if req.AttributeConfig.IsUnknown() { + return + } + + // we check tier's state instead of tier attribute's state because nil can be a valid state + // e.g. `aws-io-optimized-v2` template doesn't specify `autoscaling_min` for `hot_content` so `min_size` state is nil + tierStateDefined, diags := attributeStateDefined(ctx, path.Root("elasticsearch").AtName(m.tier), req) + + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return + } + + if !tierStateDefined { + return + } + + templateChanged, diags := attributeChanged(ctx, path.Root("deployment_template_id"), req) + + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return + } + + if templateChanged { + return + } + + resp.AttributePlan = req.AttributeState +} + +func (r useTierState) Description(ctx context.Context) string { + return "Use tier's state if it's defined and template is the same." +} + +func (r useTierState) MarkdownDescription(ctx context.Context) string { + return "Use tier's state if it's defined and template is the same." +} + +func attributeChanged(ctx context.Context, p path.Path, req tfsdk.ModifyAttributePlanRequest) (bool, diag.Diagnostics) { + var planValue attr.Value + + if diags := req.Plan.GetAttribute(ctx, p, &planValue); diags.HasError() { + return false, diags + } + + var stateValue attr.Value + + if diags := req.State.GetAttribute(ctx, p, &stateValue); diags.HasError() { + return false, diags + } + + return !planValue.Equal(stateValue), nil +} + +func attributeStateDefined(ctx context.Context, p path.Path, req tfsdk.ModifyAttributePlanRequest) (bool, diag.Diagnostics) { + var val attr.Value + + if diags := req.State.GetAttribute(ctx, p, &val); diags.HasError() { + return false, diags + } + + return !val.IsNull() && !val.IsUnknown(), nil +} diff --git a/ec/ecresource/deploymentresource/elasticsearch_expanders.go b/ec/ecresource/deploymentresource/elasticsearch_expanders.go deleted file mode 100644 index 6b3854042..000000000 --- a/ec/ecresource/deploymentresource/elasticsearch_expanders.go +++ /dev/null @@ -1,655 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "encoding/json" - "fmt" - "reflect" - "strconv" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deploymentsize" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -// These constants are only used to determine whether or not a dedicated -// tier of masters or ingest (coordinating) nodes are set. -const ( - dataTierRolePrefix = "data_" - ingestDataTierRole = "ingest" - masterDataTierRole = "master" - autodetect = "autodetect" - growAndShrink = "grow_and_shrink" - rollingGrowAndShrink = "rolling_grow_and_shrink" - rollingAll = "rolling_all" -) - -// List of update strategies availables. -var strategiesList = []string{ - autodetect, growAndShrink, rollingGrowAndShrink, rollingAll, -} - -// expandEsResources expands Elasticsearch resources -func expandEsResources(ess []interface{}, tpl *models.ElasticsearchPayload) ([]*models.ElasticsearchPayload, error) { - if len(ess) == 0 { - return nil, nil - } - - result := make([]*models.ElasticsearchPayload, 0, len(ess)) - for _, raw := range ess { - resResource, err := expandEsResource(raw, tpl) - if err != nil { - return nil, err - } - result = append(result, resResource) - } - - return result, nil -} - -// expandEsResource expands a single Elasticsearch resource -func expandEsResource(raw interface{}, res *models.ElasticsearchPayload) (*models.ElasticsearchPayload, error) { - es := raw.(map[string]interface{}) - - if refID, ok := es["ref_id"]; ok { - res.RefID = ec.String(refID.(string)) - } - - if region, ok := es["region"]; ok { - if r := region.(string); r != "" { - res.Region = ec.String(r) - } - } - - // Unsetting the curation properties is since they're deprecated since - // >= 6.6.0 which is when ILM is introduced in Elasticsearch. - unsetElasticsearchCuration(res) - - if rt, ok := es["topology"]; ok && len(rt.([]interface{})) > 0 { - topology, err := expandEsTopology(rt, res.Plan.ClusterTopology) - if err != nil { - return nil, err - } - res.Plan.ClusterTopology = topology - } - - // Fixes the node_roles field to remove the dedicated tier roles from the - // list when these are set as a dedicated tier as a topology element. - updateNodeRolesOnDedicatedTiers(res.Plan.ClusterTopology) - - if cfg, ok := es["config"]; ok { - if err := expandEsConfig(cfg, res.Plan.Elasticsearch); err != nil { - return nil, err - } - } - - if snap, ok := es["snapshot_source"]; ok && len(snap.([]interface{})) > 0 { - res.Plan.Transient = &models.TransientElasticsearchPlanConfiguration{ - RestoreSnapshot: &models.RestoreSnapshotConfiguration{}, - } - expandSnapshotSource(snap, res.Plan.Transient.RestoreSnapshot) - } - - if ext, ok := es["extension"]; ok { - if e := ext.(*schema.Set); e.Len() > 0 { - expandEsExtension(e.List(), res.Plan.Elasticsearch) - } - } - - if auto := es["autoscale"]; auto != nil { - if autoscale := auto.(string); autoscale != "" { - autoscaleBool, err := strconv.ParseBool(autoscale) - if err != nil { - return nil, fmt.Errorf("failed parsing autoscale value: %w", err) - } - res.Plan.AutoscalingEnabled = &autoscaleBool - } - } - - if trust, ok := es["trust_account"]; ok { - if t := trust.(*schema.Set); t.Len() > 0 { - if res.Settings == nil { - res.Settings = &models.ElasticsearchClusterSettings{} - } - expandAccountTrust(t.List(), res.Settings) - } - } - - if trust, ok := es["trust_external"]; ok { - if t := trust.(*schema.Set); t.Len() > 0 { - if res.Settings == nil { - res.Settings = &models.ElasticsearchClusterSettings{} - } - expandExternalTrust(t.List(), res.Settings) - } - } - - if strategy, ok := es["strategy"]; ok { - if s := strategy.([]interface{}); len(s) > 0 { - if res.Plan.Transient == nil { - res.Plan.Transient = &models.TransientElasticsearchPlanConfiguration{ - Strategy: &models.PlanStrategy{}, - } - } - expandStrategy(s, res.Plan.Transient.Strategy) - } - } - - return res, nil -} - -// expandStrategy expands the Configuration Strategy. -func expandStrategy(raw interface{}, strategy *models.PlanStrategy) { - for _, rawStrategy := range raw.([]interface{}) { - strategyCfg, ok := rawStrategy.(map[string]interface{}) - if !ok { - continue - } - rawValue := strategyCfg["type"].(string) - if rawValue == autodetect { - strategy.Autodetect = new(models.AutodetectStrategyConfig) - } else if rawValue == growAndShrink { - strategy.GrowAndShrink = new(models.GrowShrinkStrategyConfig) - } else if rawValue == rollingGrowAndShrink { - strategy.RollingGrowAndShrink = new(models.RollingGrowShrinkStrategyConfig) - } else if rawValue == rollingAll { - strategy.Rolling = &models.RollingStrategyConfig{ - GroupBy: "__all__", - } - } - } -} - -// expandEsTopology expands a flattened topology -func expandEsTopology(raw interface{}, topologies []*models.ElasticsearchClusterTopologyElement) ([]*models.ElasticsearchClusterTopologyElement, error) { - rawTopologies := raw.([]interface{}) - res := topologies - - for _, rawTop := range rawTopologies { - topology := rawTop.(map[string]interface{}) - - var topologyID string - if id, ok := topology["id"]; ok { - topologyID = id.(string) - } - - size, err := util.ParseTopologySize(topology) - if err != nil { - return nil, err - } - - elem, err := matchEsTopologyID(topologyID, topologies) - if err != nil { - return nil, fmt.Errorf("elasticsearch topology %s: %w", topologyID, err) - } - if size != nil { - elem.Size = size - } - - if zones, ok := topology["zone_count"]; ok { - if z := zones.(int); z > 0 { - elem.ZoneCount = int32(z) - } - } - - if err := parseLegacyNodeType(topology, elem.NodeType); err != nil { - return nil, err - } - - if nr, ok := topology["node_roles"]; ok { - if nrSet, ok := nr.(*schema.Set); ok && nrSet.Len() > 0 { - elem.NodeRoles = util.ItemsToString(nrSet.List()) - elem.NodeType = nil - } - } - - if autoscalingRaw := topology["autoscaling"]; autoscalingRaw != nil { - for _, autoscaleRaw := range autoscalingRaw.([]interface{}) { - autoscale := autoscaleRaw.(map[string]interface{}) - - if elem.AutoscalingMax == nil { - elem.AutoscalingMax = new(models.TopologySize) - } - - if elem.AutoscalingMin == nil { - elem.AutoscalingMin = new(models.TopologySize) - } - - err := expandAutoscalingDimension(autoscale, elem.AutoscalingMax, "max") - if err != nil { - return nil, err - } - - err = expandAutoscalingDimension(autoscale, elem.AutoscalingMin, "min") - if err != nil { - return nil, err - } - - // Ensure that if the Min and Max are empty, they're nil. - if reflect.DeepEqual(elem.AutoscalingMin, new(models.TopologySize)) { - elem.AutoscalingMin = nil - } - if reflect.DeepEqual(elem.AutoscalingMax, new(models.TopologySize)) { - elem.AutoscalingMax = nil - } - - if policy := autoscale["policy_override_json"]; policy != nil { - if policyString := policy.(string); policyString != "" { - if err := json.Unmarshal([]byte(policyString), - &elem.AutoscalingPolicyOverrideJSON, - ); err != nil { - return nil, fmt.Errorf( - "elasticsearch topology %s: unable to load policy_override_json: %w", - topologyID, err, - ) - } - } - } - } - } - - if cfg, ok := topology["config"]; ok { - if elem.Elasticsearch == nil { - elem.Elasticsearch = &models.ElasticsearchConfiguration{} - } - if err := expandEsConfig(cfg, elem.Elasticsearch); err != nil { - return nil, err - } - } - } - - return res, nil -} - -// expandAutoscalingDimension centralises processing of %_size and %_size_resource attributes -// Due to limitations in the Terraform SDK, it's not possible to specify a Default on a Computed schema member -// to work around this limitation, this function will default the %_size_resource attribute to `memory`. -// Without this default, setting autoscaling limits on tiers which do not have those limits in the deployment -// template leads to an API error due to the empty resource field on the TopologySize model. -func expandAutoscalingDimension(autoscale map[string]interface{}, model *models.TopologySize, dimension string) error { - sizeAttribute := fmt.Sprintf("%s_size", dimension) - resourceAttribute := fmt.Sprintf("%s_size_resource", dimension) - - if size := autoscale[sizeAttribute]; size != nil { - if size := size.(string); size != "" { - val, err := deploymentsize.ParseGb(size) - if err != nil { - return err - } - model.Value = &val - - if model.Resource == nil { - model.Resource = ec.String("memory") - } - } - } - - if sizeResource := autoscale[resourceAttribute]; sizeResource != nil { - if sizeResource := sizeResource.(string); sizeResource != "" { - model.Resource = ec.String(sizeResource) - } - } - - return nil -} - -func expandEsConfig(raw interface{}, esCfg *models.ElasticsearchConfiguration) error { - for _, rawCfg := range raw.([]interface{}) { - cfg, ok := rawCfg.(map[string]interface{}) - if !ok { - continue - } - if settings, ok := cfg["user_settings_json"]; ok && settings != nil { - if s, ok := settings.(string); ok && s != "" { - if err := json.Unmarshal([]byte(s), &esCfg.UserSettingsJSON); err != nil { - return fmt.Errorf( - "failed expanding elasticsearch user_settings_json: %w", err, - ) - } - } - } - if settings, ok := cfg["user_settings_override_json"]; ok && settings != nil { - if s, ok := settings.(string); ok && s != "" { - if err := json.Unmarshal([]byte(s), &esCfg.UserSettingsOverrideJSON); err != nil { - return fmt.Errorf( - "failed expanding elasticsearch user_settings_override_json: %w", err, - ) - } - } - } - if settings, ok := cfg["user_settings_yaml"]; ok { - esCfg.UserSettingsYaml = settings.(string) - } - if settings, ok := cfg["user_settings_override_yaml"]; ok { - esCfg.UserSettingsOverrideYaml = settings.(string) - } - - if v, ok := cfg["plugins"]; ok { - esCfg.EnabledBuiltInPlugins = util.ItemsToString(v.(*schema.Set).List()) - } - - if v, ok := cfg["docker_image"]; ok { - esCfg.DockerImage = v.(string) - } - } - - return nil -} - -func expandSnapshotSource(raw interface{}, restore *models.RestoreSnapshotConfiguration) { - for _, rawRestore := range raw.([]interface{}) { - var rs = rawRestore.(map[string]interface{}) - if clusterID, ok := rs["source_elasticsearch_cluster_id"]; ok { - restore.SourceClusterID = clusterID.(string) - } - - if snapshotName, ok := rs["snapshot_name"]; ok { - restore.SnapshotName = ec.String(snapshotName.(string)) - } - } -} - -func matchEsTopologyID(id string, topologies []*models.ElasticsearchClusterTopologyElement) (*models.ElasticsearchClusterTopologyElement, error) { - for _, t := range topologies { - if t.ID == id { - return t, nil - } - } - - topIDs := topologyIDs(topologies) - for i, id := range topIDs { - topIDs[i] = "\"" + id + "\"" - } - - return nil, fmt.Errorf(`invalid id: valid topology IDs are %s`, - strings.Join(topIDs, ", "), - ) -} - -// esResource returns the ElaticsearchPayload from a deployment -// template or an empty version of the payload. -func esResource(res *models.DeploymentTemplateInfoV2) *models.ElasticsearchPayload { - if len(res.DeploymentTemplate.Resources.Elasticsearch) == 0 { - return &models.ElasticsearchPayload{ - Plan: &models.ElasticsearchClusterPlan{ - Elasticsearch: &models.ElasticsearchConfiguration{}, - }, - Settings: &models.ElasticsearchClusterSettings{}, - } - } - return res.DeploymentTemplate.Resources.Elasticsearch[0] -} - -func unsetElasticsearchCuration(payload *models.ElasticsearchPayload) { - if payload.Plan.Elasticsearch != nil { - payload.Plan.Elasticsearch.Curation = nil - } - - if payload.Settings != nil { - payload.Settings.Curation = nil - } -} - -func topologyIDs(topologies []*models.ElasticsearchClusterTopologyElement) []string { - var result []string - - for _, topology := range topologies { - result = append(result, topology.ID) - } - - if len(result) == 0 { - return nil - } - return result -} - -func parseLegacyNodeType(topology map[string]interface{}, nodeType *models.ElasticsearchNodeType) error { - if nodeType == nil { - return nil - } - - if ntData, ok := topology["node_type_data"]; ok && ntData.(string) != "" { - nt, err := strconv.ParseBool(ntData.(string)) - if err != nil { - return fmt.Errorf("failed parsing node_type_data value: %w", err) - } - nodeType.Data = ec.Bool(nt) - } - - if ntMaster, ok := topology["node_type_master"]; ok && ntMaster.(string) != "" { - nt, err := strconv.ParseBool(ntMaster.(string)) - if err != nil { - return fmt.Errorf("failed parsing node_type_master value: %w", err) - } - nodeType.Master = ec.Bool(nt) - } - - if ntIngest, ok := topology["node_type_ingest"]; ok && ntIngest.(string) != "" { - nt, err := strconv.ParseBool(ntIngest.(string)) - if err != nil { - return fmt.Errorf("failed parsing node_type_ingest value: %w", err) - } - nodeType.Ingest = ec.Bool(nt) - } - - if ntMl, ok := topology["node_type_ml"]; ok && ntMl.(string) != "" { - nt, err := strconv.ParseBool(ntMl.(string)) - if err != nil { - return fmt.Errorf("failed parsing node_type_ml value: %w", err) - } - nodeType.Ml = ec.Bool(nt) - } - - return nil -} - -func updateNodeRolesOnDedicatedTiers(topologies []*models.ElasticsearchClusterTopologyElement) { - dataTier, hasMasterTier, hasIngestTier := dedicatedTopoogies(topologies) - // This case is not very likely since all deployments will have a data tier. - // It's here because the code path is technically possible and it's better - // than a straight panic. - if dataTier == nil { - return - } - - if hasIngestTier { - dataTier.NodeRoles = removeItemFromSlice( - dataTier.NodeRoles, ingestDataTierRole, - ) - } - if hasMasterTier { - dataTier.NodeRoles = removeItemFromSlice( - dataTier.NodeRoles, masterDataTierRole, - ) - } -} - -func dedicatedTopoogies(topologies []*models.ElasticsearchClusterTopologyElement) (dataTier *models.ElasticsearchClusterTopologyElement, hasMasterTier, hasIngestTier bool) { - for _, topology := range topologies { - var hasSomeDataRole bool - var hasMasterRole bool - var hasIngestRole bool - for _, role := range topology.NodeRoles { - sizeNonZero := *topology.Size.Value > 0 - if strings.HasPrefix(role, dataTierRolePrefix) && sizeNonZero { - hasSomeDataRole = true - } - if role == ingestDataTierRole && sizeNonZero { - hasIngestRole = true - } - if role == masterDataTierRole && sizeNonZero { - hasMasterRole = true - } - } - - if !hasSomeDataRole && hasMasterRole { - hasMasterTier = true - } - - if !hasSomeDataRole && hasIngestRole { - hasIngestTier = true - } - - if hasSomeDataRole && hasMasterRole { - dataTier = topology - } - } - - return dataTier, hasMasterTier, hasIngestTier -} - -func removeItemFromSlice(slice []string, item string) []string { - var hasItem bool - var itemIndex int - for i, str := range slice { - if str == item { - hasItem = true - itemIndex = i - } - } - if hasItem { - copy(slice[itemIndex:], slice[itemIndex+1:]) - return slice[:len(slice)-1] - } - return slice -} - -func expandEsExtension(raw []interface{}, es *models.ElasticsearchConfiguration) { - for _, rawExt := range raw { - m := rawExt.(map[string]interface{}) - - var version string - if v, ok := m["version"]; ok { - version = v.(string) - } - - var url string - if u, ok := m["url"]; ok { - url = u.(string) - } - - var name string - if n, ok := m["name"]; ok { - name = n.(string) - } - - if t, ok := m["type"]; ok && t.(string) == "bundle" { - es.UserBundles = append(es.UserBundles, &models.ElasticsearchUserBundle{ - Name: &name, - ElasticsearchVersion: &version, - URL: &url, - }) - } - - if t, ok := m["type"]; ok && t.(string) == "plugin" { - es.UserPlugins = append(es.UserPlugins, &models.ElasticsearchUserPlugin{ - Name: &name, - ElasticsearchVersion: &version, - URL: &url, - }) - } - } -} - -func expandAccountTrust(raw []interface{}, es *models.ElasticsearchClusterSettings) { - var accounts []*models.AccountTrustRelationship - for _, rawTrust := range raw { - m := rawTrust.(map[string]interface{}) - - var id string - if v, ok := m["account_id"]; ok { - id = v.(string) - } - - var all bool - if a, ok := m["trust_all"]; ok { - all = a.(bool) - } - - var allowlist []string - if al, ok := m["trust_allowlist"]; ok { - set := al.(*schema.Set) - if set.Len() > 0 { - allowlist = util.ItemsToString(set.List()) - } - } - - accounts = append(accounts, &models.AccountTrustRelationship{ - AccountID: &id, - TrustAll: &all, - TrustAllowlist: allowlist, - }) - } - - if len(accounts) == 0 { - return - } - - if es.Trust == nil { - es.Trust = &models.ElasticsearchClusterTrustSettings{} - } - - es.Trust.Accounts = append(es.Trust.Accounts, accounts...) -} - -func expandExternalTrust(raw []interface{}, es *models.ElasticsearchClusterSettings) { - var external []*models.ExternalTrustRelationship - for _, rawTrust := range raw { - m := rawTrust.(map[string]interface{}) - - var id string - if v, ok := m["relationship_id"]; ok { - id = v.(string) - } - - var all bool - if a, ok := m["trust_all"]; ok { - all = a.(bool) - } - - var allowlist []string - if al, ok := m["trust_allowlist"]; ok { - set := al.(*schema.Set) - if set.Len() > 0 { - allowlist = util.ItemsToString(set.List()) - } - } - - external = append(external, &models.ExternalTrustRelationship{ - TrustRelationshipID: &id, - TrustAll: &all, - TrustAllowlist: allowlist, - }) - } - - if len(external) == 0 { - return - } - - if es.Trust == nil { - es.Trust = &models.ElasticsearchClusterTrustSettings{} - } - - es.Trust.External = append(es.Trust.External, external...) -} diff --git a/ec/ecresource/deploymentresource/elasticsearch_flatteners.go b/ec/ecresource/deploymentresource/elasticsearch_flatteners.go deleted file mode 100644 index 538caf172..000000000 --- a/ec/ecresource/deploymentresource/elasticsearch_flatteners.go +++ /dev/null @@ -1,360 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "bytes" - "encoding/json" - "fmt" - "sort" - "strconv" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/elastic/cloud-sdk-go/pkg/models" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -// flattenEsResources takes in Elasticsearch resource models and returns its -// flattened form. -func flattenEsResources(in []*models.ElasticsearchResourceInfo, name string, remotes models.RemoteResources) ([]interface{}, error) { - result := make([]interface{}, 0, len(in)) - for _, res := range in { - m := make(map[string]interface{}) - if util.IsCurrentEsPlanEmpty(res) || isEsResourceStopped(res) { - continue - } - - if res.Info.ClusterID != nil && *res.Info.ClusterID != "" { - m["resource_id"] = *res.Info.ClusterID - } - - if res.RefID != nil && *res.RefID != "" { - m["ref_id"] = *res.RefID - } - - if res.Region != nil { - m["region"] = *res.Region - } - - plan := res.Info.PlanInfo.Current.Plan - topology, err := flattenEsTopology(plan) - if err != nil { - return nil, err - } - if len(topology) > 0 { - m["topology"] = topology - } - - if plan.AutoscalingEnabled != nil { - m["autoscale"] = strconv.FormatBool(*plan.AutoscalingEnabled) - } - - if meta := res.Info.Metadata; meta != nil && meta.CloudID != "" { - m["cloud_id"] = meta.CloudID - } - - for k, v := range util.FlattenClusterEndpoint(res.Info.Metadata) { - m[k] = v - } - - m["config"] = flattenEsConfig(plan.Elasticsearch) - - if remotes := flattenEsRemotes(remotes); remotes.Len() > 0 { - m["remote_cluster"] = remotes - } - - extensions := schema.NewSet(esExtensionHash, nil) - for _, ext := range flattenEsBundles(plan.Elasticsearch.UserBundles) { - extensions.Add(ext) - } - - for _, ext := range flattenEsPlugins(plan.Elasticsearch.UserPlugins) { - extensions.Add(ext) - } - - if extensions.Len() > 0 { - m["extension"] = extensions - } - - if settings := res.Info.Settings; settings != nil { - if trust := flattenAccountTrust(settings.Trust); trust != nil { - m["trust_account"] = trust - } - - if trust := flattenExternalTrust(settings.Trust); trust != nil { - m["trust_external"] = trust - } - } - - result = append(result, m) - } - - return result, nil -} - -func isPotentiallySizedTopology(topology *models.ElasticsearchClusterTopologyElement, isAutoscaling bool) bool { - currentlySized := topology.Size != nil && topology.Size.Value != nil && *topology.Size.Value > 0 - canBeSized := isAutoscaling && topology.AutoscalingMax != nil && topology.AutoscalingMax.Value != nil && *topology.AutoscalingMax.Value > 0 - - return currentlySized || canBeSized -} - -func flattenEsTopology(plan *models.ElasticsearchClusterPlan) ([]interface{}, error) { - result := make([]interface{}, 0, len(plan.ClusterTopology)) - for _, topology := range plan.ClusterTopology { - var m = make(map[string]interface{}) - if !isPotentiallySizedTopology(topology, plan.AutoscalingEnabled != nil && *plan.AutoscalingEnabled) { - continue - } - - // ID is always set. - m["id"] = topology.ID - - if topology.InstanceConfigurationID != "" { - m["instance_configuration_id"] = topology.InstanceConfigurationID - } - - // TODO: Check legacy plans. - // if topology.MemoryPerNode > 0 { - // m["size"] = strconv.Itoa(int(topology.MemoryPerNode)) - // } - - if topology.Size != nil { - m["size"] = util.MemoryToState(*topology.Size.Value) - m["size_resource"] = *topology.Size.Resource - } - - m["zone_count"] = topology.ZoneCount - - if nt := topology.NodeType; nt != nil { - if nt.Data != nil { - m["node_type_data"] = strconv.FormatBool(*nt.Data) - } - - if nt.Ingest != nil { - m["node_type_ingest"] = strconv.FormatBool(*nt.Ingest) - } - - if nt.Master != nil { - m["node_type_master"] = strconv.FormatBool(*nt.Master) - } - - if nt.Ml != nil { - m["node_type_ml"] = strconv.FormatBool(*nt.Ml) - } - } - - if len(topology.NodeRoles) > 0 { - m["node_roles"] = schema.NewSet(schema.HashString, util.StringToItems( - topology.NodeRoles..., - )) - } - - autoscaling := make(map[string]interface{}) - if ascale := topology.AutoscalingMax; ascale != nil { - autoscaling["max_size_resource"] = *ascale.Resource - autoscaling["max_size"] = util.MemoryToState(*ascale.Value) - } - - if ascale := topology.AutoscalingMin; ascale != nil { - autoscaling["min_size_resource"] = *ascale.Resource - autoscaling["min_size"] = util.MemoryToState(*ascale.Value) - } - - if topology.AutoscalingPolicyOverrideJSON != nil { - b, err := json.Marshal(topology.AutoscalingPolicyOverrideJSON) - if err != nil { - return nil, fmt.Errorf( - "elasticsearch topology %s: unable to persist policy_override_json: %w", - topology.ID, err, - ) - } - autoscaling["policy_override_json"] = string(b) - } - - if len(autoscaling) > 0 { - m["autoscaling"] = []interface{}{autoscaling} - } - - // Computed config object to avoid unsetting legacy topology config settings. - m["config"] = flattenEsConfig(topology.Elasticsearch) - - result = append(result, m) - } - - // Ensure the topologies are sorted alphabetically by ID. - sort.SliceStable(result, func(i, j int) bool { - a := result[i].(map[string]interface{}) - b := result[j].(map[string]interface{}) - return a["id"].(string) < b["id"].(string) - }) - return result, nil -} - -func flattenEsConfig(cfg *models.ElasticsearchConfiguration) []interface{} { - var m = make(map[string]interface{}) - if cfg == nil { - return nil - } - - if len(cfg.EnabledBuiltInPlugins) > 0 { - m["plugins"] = schema.NewSet(schema.HashString, - util.StringToItems(cfg.EnabledBuiltInPlugins...), - ) - } - - if cfg.UserSettingsYaml != "" { - m["user_settings_yaml"] = cfg.UserSettingsYaml - } - - if cfg.UserSettingsOverrideYaml != "" { - m["user_settings_override_yaml"] = cfg.UserSettingsOverrideYaml - } - - if o := cfg.UserSettingsJSON; o != nil { - if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { - m["user_settings_json"] = string(b) - } - } - - if o := cfg.UserSettingsOverrideJSON; o != nil { - if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { - m["user_settings_override_json"] = string(b) - } - } - - if cfg.DockerImage != "" { - m["docker_image"] = cfg.DockerImage - } - - // If no settings are set, there's no need to store the empty values in the - // state and makes the state consistent with a clean import return. - if len(m) == 0 { - return nil - } - - return []interface{}{m} -} - -func flattenEsRemotes(in models.RemoteResources) *schema.Set { - res := newElasticsearchRemoteSet() - for _, r := range in.Resources { - var m = make(map[string]interface{}) - if r.DeploymentID != nil && *r.DeploymentID != "" { - m["deployment_id"] = *r.DeploymentID - } - - if r.ElasticsearchRefID != nil && *r.ElasticsearchRefID != "" { - m["ref_id"] = *r.ElasticsearchRefID - } - - if r.Alias != nil && *r.Alias != "" { - m["alias"] = *r.Alias - } - - if r.SkipUnavailable != nil { - m["skip_unavailable"] = *r.SkipUnavailable - } - res.Add(m) - } - - return res -} - -func newElasticsearchRemoteSet(remotes ...interface{}) *schema.Set { - return schema.NewSet( - schema.HashResource(elasticsearchRemoteCluster().Elem.(*schema.Resource)), - remotes, - ) -} - -func flattenEsBundles(in []*models.ElasticsearchUserBundle) []interface{} { - result := make([]interface{}, 0, len(in)) - for _, bundle := range in { - m := make(map[string]interface{}) - m["type"] = "bundle" - m["version"] = *bundle.ElasticsearchVersion - m["url"] = *bundle.URL - m["name"] = *bundle.Name - - result = append(result, m) - } - - return result -} - -func flattenEsPlugins(in []*models.ElasticsearchUserPlugin) []interface{} { - result := make([]interface{}, 0, len(in)) - for _, plugin := range in { - m := make(map[string]interface{}) - m["type"] = "plugin" - m["version"] = *plugin.ElasticsearchVersion - m["url"] = *plugin.URL - m["name"] = *plugin.Name - - result = append(result, m) - } - - return result -} - -func flattenAccountTrust(in *models.ElasticsearchClusterTrustSettings) *schema.Set { - if in == nil { - return nil - } - - account := schema.NewSet(schema.HashResource(accountResource()), nil) - for _, acc := range in.Accounts { - account.Add(map[string]interface{}{ - "account_id": *acc.AccountID, - "trust_all": *acc.TrustAll, - "trust_allowlist": schema.NewSet(schema.HashString, - util.StringToItems(acc.TrustAllowlist...), - ), - }) - } - - if account.Len() > 0 { - return account - } - return nil -} - -func flattenExternalTrust(in *models.ElasticsearchClusterTrustSettings) *schema.Set { - if in == nil { - return nil - } - - external := schema.NewSet(schema.HashResource(externalResource()), nil) - for _, ext := range in.External { - external.Add(map[string]interface{}{ - "relationship_id": *ext.TrustRelationshipID, - "trust_all": *ext.TrustAll, - "trust_allowlist": schema.NewSet(schema.HashString, - util.StringToItems(ext.TrustAllowlist...), - ), - }) - } - - if external.Len() > 0 { - return external - } - return nil -} diff --git a/ec/ecresource/deploymentresource/elasticsearch_remote_cluster_expanders.go b/ec/ecresource/deploymentresource/elasticsearch_remote_cluster_expanders.go deleted file mode 100644 index 7d01f7782..000000000 --- a/ec/ecresource/deploymentresource/elasticsearch_remote_cluster_expanders.go +++ /dev/null @@ -1,80 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/esremoteclustersapi" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" -) - -func handleRemoteClusters(d *schema.ResourceData, client *api.API) error { - if keyIsEmptyUnchanged(d, "elasticsearch.0.remote_cluster") { - return nil - } - - remoteResources := expandRemoteClusters( - d.Get("elasticsearch.0.remote_cluster").(*schema.Set), - ) - - return esremoteclustersapi.Update(esremoteclustersapi.UpdateParams{ - API: client, - DeploymentID: d.Id(), - RefID: d.Get("elasticsearch.0.ref_id").(string), - RemoteResources: remoteResources, - }) -} - -func expandRemoteClusters(set *schema.Set) *models.RemoteResources { - res := models.RemoteResources{Resources: []*models.RemoteResourceRef{}} - - for _, r := range set.List() { - var resourceRef models.RemoteResourceRef - m := r.(map[string]interface{}) - - if id, ok := m["deployment_id"]; ok { - resourceRef.DeploymentID = ec.String(id.(string)) - } - - if v, ok := m["ref_id"]; ok { - resourceRef.ElasticsearchRefID = ec.String(v.(string)) - } - - if v, ok := m["alias"]; ok { - resourceRef.Alias = ec.String(v.(string)) - } - - if v, ok := m["skip_unavailable"]; ok { - resourceRef.SkipUnavailable = ec.Bool(v.(bool)) - } - - res.Resources = append(res.Resources, &resourceRef) - } - - return &res -} - -func keyIsEmptyUnchanged(d *schema.ResourceData, k string) bool { - old, new := d.GetChange(k) - oldSlice := old.(*schema.Set) - newSlice := new.(*schema.Set) - return oldSlice.Len() == 0 && newSlice.Len() == 0 -} diff --git a/ec/ecresource/deploymentresource/elasticsearch_remote_cluster_expanders_test.go b/ec/ecresource/deploymentresource/elasticsearch_remote_cluster_expanders_test.go deleted file mode 100644 index c069aa03b..000000000 --- a/ec/ecresource/deploymentresource/elasticsearch_remote_cluster_expanders_test.go +++ /dev/null @@ -1,155 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "testing" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" - - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -func Test_handleRemoteClusters(t *testing.T) { - deploymentEmptyRD := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleDeploymentEmptyRD(), - Schema: newSchema(), - }) - deploymentWithRemotesRD := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.7.0", - "elasticsearch": []interface{}{map[string]interface{}{ - "remote_cluster": []interface{}{ - map[string]interface{}{ - "alias": "alias", - "deployment_id": "someid", - "ref_id": "main-elasticsearch", - "skip_unavailable": true, - }, - map[string]interface{}{ - "deployment_id": "some other id", - "ref_id": "main-elasticsearch", - }, - }, - }}, - }, - Schema: newSchema(), - }) - type args struct { - d *schema.ResourceData - client *api.API - } - tests := []struct { - name string - args args - err error - }{ - { - name: "returns when the resource has no remote clusters", - args: args{ - d: deploymentEmptyRD, - client: api.NewMock(), - }, - }, - { - name: "flattens the remote clusters", - args: args{ - d: deploymentWithRemotesRD, - client: api.NewMock(mock.New202ResponseAssertion( - &mock.RequestAssertion{ - Header: api.DefaultWriteMockHeaders, - Host: api.DefaultMockHost, - Path: `/api/v1/deployments/320b7b540dfc967a7a649c18e2fce4ed/elasticsearch/main-elasticsearch/remote-clusters`, - Method: "PUT", - Body: mock.NewStringBody(`{"resources":[{"alias":"alias","deployment_id":"someid","elasticsearch_ref_id":"main-elasticsearch","skip_unavailable":true},{"alias":"","deployment_id":"some other id","elasticsearch_ref_id":"main-elasticsearch","skip_unavailable":false}]}` + "\n"), - }, - mock.NewStringBody("{}"), - )), - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := handleRemoteClusters(tt.args.d, tt.args.client) - if !assert.Equal(t, tt.err, err) { - t.Error(err) - } - }) - } -} - -func Test_expandRemoteClusters(t *testing.T) { - type args struct { - set *schema.Set - } - tests := []struct { - name string - args args - want *models.RemoteResources - }{ - { - name: "wants no error or empty res", - args: args{set: newElasticsearchRemoteSet()}, - want: &models.RemoteResources{Resources: []*models.RemoteResourceRef{}}, - }, - { - name: "expands remotes", - args: args{set: newElasticsearchRemoteSet([]interface{}{ - map[string]interface{}{ - "alias": "alias", - "deployment_id": "someid", - "ref_id": "main-elasticsearch", - "skip_unavailable": true, - }, - map[string]interface{}{ - "deployment_id": "some other id", - "ref_id": "main-elasticsearch", - }, - }...)}, - want: &models.RemoteResources{Resources: []*models.RemoteResourceRef{ - { - DeploymentID: ec.String("some other id"), - ElasticsearchRefID: ec.String("main-elasticsearch"), - }, - { - Alias: ec.String("alias"), - DeploymentID: ec.String("someid"), - ElasticsearchRefID: ec.String("main-elasticsearch"), - SkipUnavailable: ec.Bool(true), - }, - }}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := expandRemoteClusters(tt.args.set) - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/ec/ecresource/deploymentresource/enterprise_search_expanders.go b/ec/ecresource/deploymentresource/enterprise_search_expanders.go deleted file mode 100644 index 77077befb..000000000 --- a/ec/ecresource/deploymentresource/enterprise_search_expanders.go +++ /dev/null @@ -1,211 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "encoding/json" - "errors" - "fmt" - - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -// expandEssResources expands Enterprise Search resources into their models. -func expandEssResources(ess []interface{}, tpl *models.EnterpriseSearchPayload) ([]*models.EnterpriseSearchPayload, error) { - if len(ess) == 0 { - return nil, nil - } - - if tpl == nil { - return nil, errors.New("enterprise_search specified but deployment template is not configured for it. Use a different template if you wish to add enterprise_search") - } - - result := make([]*models.EnterpriseSearchPayload, 0, len(ess)) - for _, raw := range ess { - resResource, err := expandEssResource(raw, tpl) - if err != nil { - return nil, err - } - result = append(result, resResource) - } - - return result, nil -} - -func expandEssResource(raw interface{}, res *models.EnterpriseSearchPayload) (*models.EnterpriseSearchPayload, error) { - ess := raw.(map[string]interface{}) - - if esRefID, ok := ess["elasticsearch_cluster_ref_id"]; ok { - res.ElasticsearchClusterRefID = ec.String(esRefID.(string)) - } - - if refID, ok := ess["ref_id"]; ok { - res.RefID = ec.String(refID.(string)) - } - - if version, ok := ess["version"]; ok { - res.Plan.EnterpriseSearch.Version = version.(string) - } - - if region, ok := ess["region"]; ok { - if r := region.(string); r != "" { - res.Region = ec.String(r) - } - } - - if cfg, ok := ess["config"]; ok { - if err := expandEssConfig(cfg, res.Plan.EnterpriseSearch); err != nil { - return nil, err - } - } - - if rt, ok := ess["topology"]; ok && len(rt.([]interface{})) > 0 { - topology, err := expandEssTopology(rt, res.Plan.ClusterTopology) - if err != nil { - return nil, err - } - res.Plan.ClusterTopology = topology - } else { - res.Plan.ClusterTopology = defaultEssTopology(res.Plan.ClusterTopology) - } - - return res, nil -} - -func expandEssTopology(raw interface{}, topologies []*models.EnterpriseSearchTopologyElement) ([]*models.EnterpriseSearchTopologyElement, error) { - rawTopologies := raw.([]interface{}) - res := make([]*models.EnterpriseSearchTopologyElement, 0, len(rawTopologies)) - for i, rawTop := range rawTopologies { - topology := rawTop.(map[string]interface{}) - var icID string - if id, ok := topology["instance_configuration_id"]; ok { - icID = id.(string) - } - - // When a topology element is set but no instance_configuration_id - // is set, then obtain the instance_configuration_id from the topology - // element. - if t := defaultEssTopology(topologies); icID == "" && len(t) >= i { - icID = t[i].InstanceConfigurationID - } - size, err := util.ParseTopologySize(topology) - if err != nil { - return nil, err - } - - // Since Enterprise Search is not enabled by default in the template, - // if the size == nil, it means that the size hasn't been specified in - // the definition. - if size == nil { - size = &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(minimumEnterpriseSearchSize), - } - } - - elem, err := matchEssTopology(icID, topologies) - if err != nil { - return nil, err - } - if size != nil { - elem.Size = size - } - - if zones, ok := topology["zone_count"]; ok { - if z := zones.(int); z > 0 { - elem.ZoneCount = int32(z) - } - } - - res = append(res, elem) - } - - return res, nil -} - -func expandEssConfig(raw interface{}, res *models.EnterpriseSearchConfiguration) error { - for _, rawCfg := range raw.([]interface{}) { - cfg := rawCfg.(map[string]interface{}) - if settings, ok := cfg["user_settings_json"]; ok && settings != nil { - if s, ok := settings.(string); ok && s != "" { - if err := json.Unmarshal([]byte(s), &res.UserSettingsJSON); err != nil { - return fmt.Errorf("failed expanding enterprise_search user_settings_json: %w", err) - } - } - } - if settings, ok := cfg["user_settings_override_json"]; ok && settings != nil { - if s, ok := settings.(string); ok && s != "" { - if err := json.Unmarshal([]byte(s), &res.UserSettingsOverrideJSON); err != nil { - return fmt.Errorf("failed expanding enterprise_search user_settings_override_json: %w", err) - } - } - } - if settings, ok := cfg["user_settings_yaml"]; ok { - res.UserSettingsYaml = settings.(string) - } - if settings, ok := cfg["user_settings_override_yaml"]; ok { - res.UserSettingsOverrideYaml = settings.(string) - } - - if v, ok := cfg["docker_image"]; ok { - res.DockerImage = v.(string) - } - } - - return nil -} - -// defaultApmTopology iterates over all the templated topology elements and -// sets the size to the default when the template size is smaller than the -// deployment template default, the same is done on the ZoneCount. -func defaultEssTopology(topology []*models.EnterpriseSearchTopologyElement) []*models.EnterpriseSearchTopologyElement { - for _, t := range topology { - if *t.Size.Value < minimumEnterpriseSearchSize || *t.Size.Value == 0 { - t.Size.Value = ec.Int32(minimumEnterpriseSearchSize) - } - if t.ZoneCount < minimumZoneCount { - t.ZoneCount = minimumZoneCount - } - } - - return topology -} - -func matchEssTopology(id string, topologies []*models.EnterpriseSearchTopologyElement) (*models.EnterpriseSearchTopologyElement, error) { - for _, t := range topologies { - if t.InstanceConfigurationID == id { - return t, nil - } - } - return nil, fmt.Errorf( - `enterprise_search topology: invalid instance_configuration_id: "%s" doesn't match any of the deployment template instance configurations`, - id, - ) -} - -// essResource returns the EnterpriseSearchPayload from a deployment -// template or an empty version of the payload. -func essResource(res *models.DeploymentTemplateInfoV2) *models.EnterpriseSearchPayload { - if len(res.DeploymentTemplate.Resources.EnterpriseSearch) == 0 { - return nil - } - return res.DeploymentTemplate.Resources.EnterpriseSearch[0] -} diff --git a/ec/ecresource/deploymentresource/enterprise_search_expanders_test.go b/ec/ecresource/deploymentresource/enterprise_search_expanders_test.go deleted file mode 100644 index b70375a4f..000000000 --- a/ec/ecresource/deploymentresource/enterprise_search_expanders_test.go +++ /dev/null @@ -1,358 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" -) - -func Test_expandEssResources(t *testing.T) { - tpl := func() *models.EnterpriseSearchPayload { - return essResource(parseDeploymentTemplate(t, - "testdata/template-aws-io-optimized-v2.json", - )) - } - type args struct { - ess []interface{} - tpl *models.EnterpriseSearchPayload - } - tests := []struct { - name string - args args - want []*models.EnterpriseSearchPayload - err error - }{ - { - name: "returns nil when there's no resources", - }, - { - name: "parses an enterprise_search resource with explicit topology", - args: args{ - tpl: tpl(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "main-enterprise_search", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.enterprisesearch.m5d", - "size": "2g", - "zone_count": 1, - }}, - }}, - }, - want: []*models.EnterpriseSearchPayload{{ - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{ - Version: "7.7.0", - }, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }}, - }, - }}, - }, - { - name: "parses an enterprise_search resource with no topology takes the minimum size", - args: args{ - tpl: tpl(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "main-enterprise_search", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - }}, - }, - want: []*models.EnterpriseSearchPayload{{ - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{ - Version: "7.7.0", - }, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ - ZoneCount: 2, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }}, - }, - }}, - }, - { - name: "parses an enterprise_search resource with topology but no instance_configuration_id", - args: args{ - tpl: tpl(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "main-enterprise_search", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "size": "4g", - }}, - }}, - }, - want: []*models.EnterpriseSearchPayload{{ - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{ - Version: "7.7.0", - }, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ - ZoneCount: 2, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }}, - }, - }}, - }, - { - name: "parses an enterprise_search resource with topology but instance_configuration_id", - args: args{ - tpl: tpl(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "main-enterprise_search", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.enterprisesearch.m5d", - }}, - }}, - }, - want: []*models.EnterpriseSearchPayload{{ - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{ - Version: "7.7.0", - }, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ - ZoneCount: 2, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }}, - }, - }}, - }, - { - name: "parses an enterprise_search resource with topology and zone_count", - args: args{ - tpl: tpl(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "main-enterprise_search", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "zone_count": 1, - }}, - }}, - }, - want: []*models.EnterpriseSearchPayload{{ - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{ - Version: "7.7.0", - }, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }}, - }, - }}, - }, - { - name: "parses an enterprise_search resource with explicit topology and config", - args: args{ - tpl: tpl(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "secondary-enterprise_search", - "elasticsearch_cluster_ref_id": "somerefid", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: override", - "user_settings_json": `{"some.setting":"value"}`, - "user_settings_override_json": `{"some.setting":"override"}`, - }}, - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.enterprisesearch.m5d", - "size": "4g", - "zone_count": 1, - "node_type_appserver": true, - "node_type_connector": true, - "node_type_worker": true, - }}, - }}, - }, - want: []*models.EnterpriseSearchPayload{{ - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("secondary-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{ - Version: "7.7.0", - UserSettingsYaml: "some.setting: value", - UserSettingsOverrideYaml: "some.setting: override", - UserSettingsJSON: map[string]interface{}{ - "some.setting": "value", - }, - UserSettingsOverrideJSON: map[string]interface{}{ - "some.setting": "override", - }, - }, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }}, - }, - }}, - }, - { - name: "parses an enterprise_search resource with invalid instance_configuration_id", - args: args{ - tpl: tpl(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "main-enterprise_search", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.enterprisesearch.m5", - "size": "2g", - "zone_count": 1, - }}, - }}, - }, - err: errors.New(`enterprise_search topology: invalid instance_configuration_id: "aws.enterprisesearch.m5" doesn't match any of the deployment template instance configurations`), - }, - { - name: "tries to parse an enterprise_search resource when the template doesn't have an Enterprise Search instance set.", - args: args{ - tpl: nil, - ess: []interface{}{map[string]interface{}{ - "ref_id": "tertiary-enterprise_search", - "elasticsearch_cluster_ref_id": "somerefid", - "resource_id": mock.ValidClusterID, - "version": "7.8.0", - "region": "some-region", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: value2", - "user_settings_json": "{\"some.setting\": \"value\"}", - "user_settings_override_json": "{\"some.setting\": \"value2\"}", - }}, - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.enterprise_search.r5d", - "size": "4g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - err: errors.New("enterprise_search specified but deployment template is not configured for it. Use a different template if you wish to add enterprise_search"), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := expandEssResources(tt.args.ess, tt.args.tpl) - if !assert.Equal(t, tt.err, err) { - t.Error(err) - } - - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/ec/ecresource/deploymentresource/enterprise_search_flatteners.go b/ec/ecresource/deploymentresource/enterprise_search_flatteners.go deleted file mode 100644 index cc2560724..000000000 --- a/ec/ecresource/deploymentresource/enterprise_search_flatteners.go +++ /dev/null @@ -1,149 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "bytes" - "encoding/json" - - "github.com/elastic/cloud-sdk-go/pkg/models" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -// flattenEssResources flattens Enterprise Search resources into its flattened structure. -func flattenEssResources(in []*models.EnterpriseSearchResourceInfo, name string) []interface{} { - result := make([]interface{}, 0, len(in)) - for _, res := range in { - m := make(map[string]interface{}) - if util.IsCurrentEssPlanEmpty(res) || isEssResourceStopped(res) { - continue - } - - if res.RefID != nil && *res.RefID != "" { - m["ref_id"] = *res.RefID - } - - if res.Info.ID != nil && *res.Info.ID != "" { - m["resource_id"] = *res.Info.ID - } - - if res.Region != nil { - m["region"] = *res.Region - } - - plan := res.Info.PlanInfo.Current.Plan - if topology := flattenEssTopology(plan); len(topology) > 0 { - m["topology"] = topology - } - - if res.ElasticsearchClusterRefID != nil { - m["elasticsearch_cluster_ref_id"] = *res.ElasticsearchClusterRefID - } - - if urls := util.FlattenClusterEndpoint(res.Info.Metadata); len(urls) > 0 { - for k, v := range urls { - m[k] = v - } - } - - if c := flattenEssConfig(plan.EnterpriseSearch); len(c) > 0 { - m["config"] = c - } - - result = append(result, m) - } - - return result -} - -func flattenEssTopology(plan *models.EnterpriseSearchPlan) []interface{} { - var result = make([]interface{}, 0, len(plan.ClusterTopology)) - for _, topology := range plan.ClusterTopology { - var m = make(map[string]interface{}) - if topology.Size == nil || topology.Size.Value == nil || *topology.Size.Value == 0 { - continue - } - - if topology.InstanceConfigurationID != "" { - m["instance_configuration_id"] = topology.InstanceConfigurationID - } - - if topology.Size != nil { - m["size"] = util.MemoryToState(*topology.Size.Value) - m["size_resource"] = *topology.Size.Resource - } - - if nt := topology.NodeType; nt != nil { - if nt.Appserver != nil { - m["node_type_appserver"] = *nt.Appserver - } - - if nt.Connector != nil { - m["node_type_connector"] = *nt.Connector - } - - if nt.Worker != nil { - m["node_type_worker"] = *nt.Worker - } - } - - m["zone_count"] = topology.ZoneCount - - result = append(result, m) - } - - return result -} - -func flattenEssConfig(cfg *models.EnterpriseSearchConfiguration) []interface{} { - var m = make(map[string]interface{}) - if cfg == nil { - return nil - } - - if cfg.UserSettingsYaml != "" { - m["user_settings_yaml"] = cfg.UserSettingsYaml - } - - if cfg.UserSettingsOverrideYaml != "" { - m["user_settings_override_yaml"] = cfg.UserSettingsOverrideYaml - } - - if o := cfg.UserSettingsJSON; o != nil { - if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { - m["user_settings_json"] = string(b) - } - } - - if o := cfg.UserSettingsOverrideJSON; o != nil { - if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { - m["user_settings_override_json"] = string(b) - } - } - - if cfg.DockerImage != "" { - m["docker_image"] = cfg.DockerImage - } - - if len(m) == 0 { - return nil - } - - return []interface{}{m} -} diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v1/enterprise_search.go b/ec/ecresource/deploymentresource/enterprisesearch/v1/enterprise_search.go new file mode 100644 index 000000000..1e54d83d9 --- /dev/null +++ b/ec/ecresource/deploymentresource/enterprisesearch/v1/enterprise_search.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type EnterpriseSearchTF struct { + ElasticsearchClusterRefId types.String `tfsdk:"elasticsearch_cluster_ref_id"` + RefId types.String `tfsdk:"ref_id"` + ResourceId types.String `tfsdk:"resource_id"` + Region types.String `tfsdk:"region"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + Topology types.List `tfsdk:"topology"` + Config types.List `tfsdk:"config"` +} + +type EnterpriseSearch struct { + ElasticsearchClusterRefId *string `tfsdk:"elasticsearch_cluster_ref_id"` + RefId *string `tfsdk:"ref_id"` + ResourceId *string `tfsdk:"resource_id"` + Region *string `tfsdk:"region"` + HttpEndpoint *string `tfsdk:"http_endpoint"` + HttpsEndpoint *string `tfsdk:"https_endpoint"` + Topology EnterpriseSearchTopologies `tfsdk:"topology"` + Config EnterpriseSearchConfigs `tfsdk:"config"` +} + +type EnterpriseSearches []EnterpriseSearch diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v1/enterprise_search_config.go b/ec/ecresource/deploymentresource/enterprisesearch/v1/enterprise_search_config.go new file mode 100644 index 000000000..a618e4270 --- /dev/null +++ b/ec/ecresource/deploymentresource/enterprisesearch/v1/enterprise_search_config.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type EnterpriseSearchConfigTF struct { + DockerImage types.String `tfsdk:"docker_image"` + UserSettingsJson types.String `tfsdk:"user_settings_json"` + UserSettingsOverrideJson types.String `tfsdk:"user_settings_override_json"` + UserSettingsYaml types.String `tfsdk:"user_settings_yaml"` + UserSettingsOverrideYaml types.String `tfsdk:"user_settings_override_yaml"` +} + +type EnterpriseSearchConfig struct { + DockerImage *string `tfsdk:"docker_image"` + UserSettingsJson *string `tfsdk:"user_settings_json"` + UserSettingsOverrideJson *string `tfsdk:"user_settings_override_json"` + UserSettingsYaml *string `tfsdk:"user_settings_yaml"` + UserSettingsOverrideYaml *string `tfsdk:"user_settings_override_yaml"` +} + +type EnterpriseSearchConfigs []EnterpriseSearchConfig diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v1/enterprise_search_topology.go b/ec/ecresource/deploymentresource/enterprisesearch/v1/enterprise_search_topology.go new file mode 100644 index 000000000..f6b39b2cc --- /dev/null +++ b/ec/ecresource/deploymentresource/enterprisesearch/v1/enterprise_search_topology.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type EnterpriseSearchTopologyTF struct { + InstanceConfigurationId types.String `tfsdk:"instance_configuration_id"` + Size types.String `tfsdk:"size"` + SizeResource types.String `tfsdk:"size_resource"` + ZoneCount types.Int64 `tfsdk:"zone_count"` + NodeTypeAppserver types.Bool `tfsdk:"node_type_appserver"` + NodeTypeConnector types.Bool `tfsdk:"node_type_connector"` + NodeTypeWorker types.Bool `tfsdk:"node_type_worker"` +} + +type EnterpriseSearchTopology struct { + InstanceConfigurationId *string `tfsdk:"instance_configuration_id"` + Size *string `tfsdk:"size"` + SizeResource *string `tfsdk:"size_resource"` + ZoneCount int `tfsdk:"zone_count"` + NodeTypeAppserver *bool `tfsdk:"node_type_appserver"` + NodeTypeConnector *bool `tfsdk:"node_type_connector"` + NodeTypeWorker *bool `tfsdk:"node_type_worker"` +} + +type EnterpriseSearchTopologies []EnterpriseSearchTopology diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v1/schema.go b/ec/ecresource/deploymentresource/enterprisesearch/v1/schema.go new file mode 100644 index 000000000..4afd0d7e4 --- /dev/null +++ b/ec/ecresource/deploymentresource/enterprisesearch/v1/schema.go @@ -0,0 +1,181 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func EnterpriseSearchSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Enterprise Search resource definition", + Optional: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "elasticsearch_cluster_ref_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), + resource.UseStateForUnknown(), + }, + }, + "ref_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-enterprise_search"}), + resource.UseStateForUnknown(), + }, + }, + "resource_id": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "region": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "http_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "https_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "topology": { + Description: "Optional topology attribute", + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "instance_configuration_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "size": { + Type: types.StringType, + Computed: true, + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "size_resource": { + Type: types.StringType, + Description: `Optional size type, defaults to "memory".`, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "memory"}), + resource.UseStateForUnknown(), + }, + }, + "zone_count": { + Type: types.Int64Type, + Computed: true, + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "node_type_appserver": { + Type: types.BoolType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "node_type_connector": { + Type: types.BoolType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "node_type_worker": { + Type: types.BoolType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + }), + }, + "config": { + Description: `Optionally define the Enterprise Search configuration options for the Enterprise Search Server`, + Optional: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + // TODO + // DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + "docker_image": { + Type: types.StringType, + Description: "Optionally override the docker image the Enterprise Search nodes will use. Note that this field will only work for internal users only.", + Optional: true, + }, + "user_settings_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + }), + }, + }), + } +} diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search.go new file mode 100644 index 000000000..ad846c3c6 --- /dev/null +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search.go @@ -0,0 +1,217 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/elastic/cloud-sdk-go/pkg/models" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v1" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type EnterpriseSearchTF struct { + ElasticsearchClusterRefId types.String `tfsdk:"elasticsearch_cluster_ref_id"` + RefId types.String `tfsdk:"ref_id"` + ResourceId types.String `tfsdk:"resource_id"` + Region types.String `tfsdk:"region"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + InstanceConfigurationId types.String `tfsdk:"instance_configuration_id"` + Size types.String `tfsdk:"size"` + SizeResource types.String `tfsdk:"size_resource"` + ZoneCount types.Int64 `tfsdk:"zone_count"` + NodeTypeAppserver types.Bool `tfsdk:"node_type_appserver"` + NodeTypeConnector types.Bool `tfsdk:"node_type_connector"` + NodeTypeWorker types.Bool `tfsdk:"node_type_worker"` + Config types.Object `tfsdk:"config"` +} + +type EnterpriseSearch struct { + ElasticsearchClusterRefId *string `tfsdk:"elasticsearch_cluster_ref_id"` + RefId *string `tfsdk:"ref_id"` + ResourceId *string `tfsdk:"resource_id"` + Region *string `tfsdk:"region"` + HttpEndpoint *string `tfsdk:"http_endpoint"` + HttpsEndpoint *string `tfsdk:"https_endpoint"` + InstanceConfigurationId *string `tfsdk:"instance_configuration_id"` + Size *string `tfsdk:"size"` + SizeResource *string `tfsdk:"size_resource"` + ZoneCount int `tfsdk:"zone_count"` + NodeTypeAppserver *bool `tfsdk:"node_type_appserver"` + NodeTypeConnector *bool `tfsdk:"node_type_connector"` + NodeTypeWorker *bool `tfsdk:"node_type_worker"` + Config *EnterpriseSearchConfig `tfsdk:"config"` +} + +type EnterpriseSearches []EnterpriseSearch + +func ReadEnterpriseSearch(in *models.EnterpriseSearchResourceInfo) (*EnterpriseSearch, error) { + if util.IsCurrentEssPlanEmpty(in) || utils.IsEssResourceStopped(in) { + return nil, nil + } + + var ess EnterpriseSearch + + ess.RefId = in.RefID + + ess.ResourceId = in.Info.ID + + ess.Region = in.Region + + plan := in.Info.PlanInfo.Current.Plan + + topologies, err := ReadEnterpriseSearchTopologies(plan.ClusterTopology) + + if err != nil { + return nil, err + } + + if len(topologies) > 0 { + ess.InstanceConfigurationId = topologies[0].InstanceConfigurationId + ess.Size = topologies[0].Size + ess.SizeResource = topologies[0].SizeResource + ess.ZoneCount = topologies[0].ZoneCount + ess.NodeTypeAppserver = topologies[0].NodeTypeAppserver + ess.NodeTypeConnector = topologies[0].NodeTypeConnector + ess.NodeTypeWorker = topologies[0].NodeTypeWorker + } + + ess.ElasticsearchClusterRefId = in.ElasticsearchClusterRefID + + ess.HttpEndpoint, ess.HttpsEndpoint = converters.ExtractEndpoints(in.Info.Metadata) + + cfg, err := ReadEnterpriseSearchConfig(plan.EnterpriseSearch) + if err != nil { + return nil, err + } + ess.Config = cfg + + return &ess, nil +} + +func (es *EnterpriseSearchTF) Payload(ctx context.Context, payload models.EnterpriseSearchPayload) (*models.EnterpriseSearchPayload, diag.Diagnostics) { + var diags diag.Diagnostics + + if !es.ElasticsearchClusterRefId.IsNull() { + payload.ElasticsearchClusterRefID = &es.ElasticsearchClusterRefId.Value + } + + if !es.RefId.IsNull() { + payload.RefID = &es.RefId.Value + } + + if es.Region.Value != "" { + payload.Region = &es.Region.Value + } + + if !es.Config.IsNull() && !es.Config.IsUnknown() { + var config *v1.EnterpriseSearchConfigTF + + ds := tfsdk.ValueAs(ctx, es.Config, &config) + + diags.Append(ds...) + + if !ds.HasError() && config != nil { + diags.Append(EnterpriseSearchConfigPayload(ctx, *config, payload.Plan.EnterpriseSearch)...) + } + } + + topologyTF := v1.EnterpriseSearchTopologyTF{ + InstanceConfigurationId: es.InstanceConfigurationId, + Size: es.Size, + SizeResource: es.SizeResource, + ZoneCount: es.ZoneCount, + NodeTypeAppserver: es.NodeTypeAppserver, + NodeTypeConnector: es.NodeTypeConnector, + NodeTypeWorker: es.NodeTypeWorker, + } + + topology, ds := enterpriseSearchTopologyPayload(ctx, topologyTF, defaultEssTopology(payload.Plan.ClusterTopology), 0) + + diags = append(diags, ds...) + + if topology != nil { + payload.Plan.ClusterTopology = []*models.EnterpriseSearchTopologyElement{topology} + } + + return &payload, diags +} + +func ReadEnterpriseSearches(in []*models.EnterpriseSearchResourceInfo) (*EnterpriseSearch, error) { + for _, model := range in { + if util.IsCurrentEssPlanEmpty(model) || utils.IsEssResourceStopped(model) { + continue + } + + es, err := ReadEnterpriseSearch(model) + if err != nil { + return nil, err + } + + return es, nil + } + + return nil, nil +} + +func EnterpriseSearchesPayload(ctx context.Context, esObj types.Object, template *models.DeploymentTemplateInfoV2) (*models.EnterpriseSearchPayload, diag.Diagnostics) { + var diags diag.Diagnostics + + var es *EnterpriseSearchTF + + if diags = tfsdk.ValueAs(ctx, esObj, &es); diags.HasError() { + return nil, diags + } + + if es == nil { + return nil, nil + } + + templatePayload := EssResource(template) + + if templatePayload == nil { + diags.AddError( + "enterprise_search payload error", + "enterprise_search specified but deployment template is not configured for it. Use a different template if you wish to add enterprise_search", + ) + return nil, diags + } + + payload, diags := es.Payload(ctx, *templatePayload) + + if diags.HasError() { + return nil, diags + } + + return payload, nil +} + +// EssResource returns the EnterpriseSearchPayload from a deployment +// template or an empty version of the payload. +func EssResource(template *models.DeploymentTemplateInfoV2) *models.EnterpriseSearchPayload { + if template == nil || len(template.DeploymentTemplate.Resources.EnterpriseSearch) == 0 { + return nil + } + return template.DeploymentTemplate.Resources.EnterpriseSearch[0] +} diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_config.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_config.go new file mode 100644 index 000000000..2d3f64218 --- /dev/null +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_config.go @@ -0,0 +1,96 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v1" + "github.com/hashicorp/terraform-plugin-framework/diag" +) + +type EnterpriseSearchConfig v1.EnterpriseSearchConfig + +func ReadEnterpriseSearchConfig(in *models.EnterpriseSearchConfiguration) (*EnterpriseSearchConfig, error) { + var cfg EnterpriseSearchConfig + + if in == nil { + return nil, nil + } + + if in.UserSettingsYaml != "" { + cfg.UserSettingsYaml = &in.UserSettingsYaml + } + + if in.UserSettingsOverrideYaml != "" { + cfg.UserSettingsOverrideYaml = &in.UserSettingsOverrideYaml + } + + if o := in.UserSettingsJSON; o != nil { + if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { + cfg.UserSettingsJson = ec.String(string(b)) + } + } + + if o := in.UserSettingsOverrideJSON; o != nil { + if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { + cfg.UserSettingsOverrideJson = ec.String(string(b)) + } + } + + if in.DockerImage != "" { + cfg.DockerImage = &in.DockerImage + } + + if cfg == (EnterpriseSearchConfig{}) { + return nil, nil + } + + return &cfg, nil +} + +func EnterpriseSearchConfigPayload(ctx context.Context, cfg v1.EnterpriseSearchConfigTF, res *models.EnterpriseSearchConfiguration) diag.Diagnostics { + var diags diag.Diagnostics + + if cfg.UserSettingsJson.Value != "" { + if err := json.Unmarshal([]byte(cfg.UserSettingsJson.Value), &res.UserSettingsJSON); err != nil { + diags.AddError("failed expanding enterprise_search user_settings_json", err.Error()) + } + } + if cfg.UserSettingsOverrideJson.Value != "" { + if err := json.Unmarshal([]byte(cfg.UserSettingsOverrideJson.Value), &res.UserSettingsOverrideJSON); err != nil { + diags.AddError("failed expanding enterprise_search user_settings_override_json", err.Error()) + } + } + if !cfg.UserSettingsYaml.IsNull() { + res.UserSettingsYaml = cfg.UserSettingsYaml.Value + } + if !cfg.UserSettingsOverrideYaml.IsNull() { + res.UserSettingsOverrideYaml = cfg.UserSettingsOverrideYaml.Value + } + + if !cfg.DockerImage.IsNull() { + res.DockerImage = cfg.DockerImage.Value + } + + return diags +} diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_payload_test.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_payload_test.go new file mode 100644 index 000000000..602c908f8 --- /dev/null +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_payload_test.go @@ -0,0 +1,338 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" + + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/testutil" +) + +func Test_enterpriseSearchPayload(t *testing.T) { + type args struct { + es *EnterpriseSearch + template *models.DeploymentTemplateInfoV2 + } + tests := []struct { + name string + args args + want *models.EnterpriseSearchPayload + diags diag.Diagnostics + }{ + { + name: "returns nil when there's no resources", + }, + { + name: "parses an enterprise_search resource with explicit topology", + args: args{ + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + es: &EnterpriseSearch{ + RefId: ec.String("main-enterprise_search"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + InstanceConfigurationId: ec.String("aws.enterprisesearch.m5d"), + Size: ec.String("2g"), + ZoneCount: 1, + }, + }, + want: &models.EnterpriseSearchPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-enterprise_search"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }, + }, + }, + }, + }, + { + name: "parses an enterprise_search resource with no topology takes the minimum size", + args: args{ + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + es: &EnterpriseSearch{ + RefId: ec.String("main-enterprise_search"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + }, + }, + want: &models.EnterpriseSearchPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-enterprise_search"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ + ZoneCount: 2, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }}, + }, + }, + }, + { + name: "parses an enterprise_search resource with topology but no instance_configuration_id", + args: args{ + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + es: &EnterpriseSearch{ + RefId: ec.String("main-enterprise_search"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + Size: ec.String("4g"), + }, + }, + want: &models.EnterpriseSearchPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-enterprise_search"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ + ZoneCount: 2, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }}, + }, + }, + }, + { + name: "parses an enterprise_search resource with topology but instance_configuration_id", + args: args{ + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + es: &EnterpriseSearch{ + RefId: ec.String("main-enterprise_search"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + InstanceConfigurationId: ec.String("aws.enterprisesearch.m5d"), + }, + }, + want: &models.EnterpriseSearchPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-enterprise_search"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ + ZoneCount: 2, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }}, + }, + }, + }, + { + name: "parses an enterprise_search resource with topology and zone_count", + args: args{ + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + es: &EnterpriseSearch{ + RefId: ec.String("main-enterprise_search"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + ZoneCount: 1, + }, + }, + want: &models.EnterpriseSearchPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-enterprise_search"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }}, + }, + }, + }, + { + name: "parses an enterprise_search resource with explicit topology and config", + args: args{ + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + es: &EnterpriseSearch{ + RefId: ec.String("secondary-enterprise_search"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + Config: &EnterpriseSearchConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: override"), + UserSettingsJson: ec.String(`{"some.setting":"value"}`), + UserSettingsOverrideJson: ec.String(`{"some.setting":"override"}`), + }, + InstanceConfigurationId: ec.String("aws.enterprisesearch.m5d"), + Size: ec.String("4g"), + ZoneCount: 1, + NodeTypeAppserver: ec.Bool(true), + NodeTypeConnector: ec.Bool(true), + NodeTypeWorker: ec.Bool(true), + }, + }, + want: &models.EnterpriseSearchPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("secondary-enterprise_search"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{ + UserSettingsYaml: "some.setting: value", + UserSettingsOverrideYaml: "some.setting: override", + UserSettingsJSON: map[string]interface{}{ + "some.setting": "value", + }, + UserSettingsOverrideJSON: map[string]interface{}{ + "some.setting": "override", + }, + }, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }}, + }, + }, + }, + { + name: "parses an enterprise_search resource with invalid instance_configuration_id", + args: args{ + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + es: &EnterpriseSearch{ + RefId: ec.String("main-enterprise_search"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + InstanceConfigurationId: ec.String("aws.enterprisesearch.m5"), + Size: ec.String("2g"), + ZoneCount: 1, + }, + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("cannot match enterprise search topology", `invalid instance_configuration_id: "aws.enterprisesearch.m5" doesn't match any of the deployment template instance configurations`) + return diags + }(), + }, + { + name: "tries to parse an enterprise_search resource when the template doesn't have an Enterprise Search instance set.", + args: args{ + template: nil, + es: &EnterpriseSearch{ + RefId: ec.String("tertiary-enterprise_search"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + Config: &EnterpriseSearchConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String(`{"some.setting": "value"}`), + UserSettingsOverrideJson: ec.String(`{"some.setting": "value2"}`), + }, + InstanceConfigurationId: ec.String("aws.enterprisesearch.m5d"), + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("enterprise_search payload error", `enterprise_search specified but deployment template is not configured for it. Use a different template if you wish to add enterprise_search`) + return diags + }(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var ess types.Object + diags := tfsdk.ValueFrom(context.Background(), tt.args.es, EnterpriseSearchSchema().FrameworkType(), &ess) + assert.Nil(t, diags) + + got, diags := EnterpriseSearchesPayload(context.Background(), ess, tt.args.template) + if tt.diags != nil { + assert.Equal(t, tt.diags, diags) + } + + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/ec/ecresource/deploymentresource/enterprise_search_flatteners_test.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read_test.go similarity index 79% rename from ec/ecresource/deploymentresource/enterprise_search_flatteners_test.go rename to ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read_test.go index 69d725de7..fb10b3c84 100644 --- a/ec/ecresource/deploymentresource/enterprise_search_flatteners_test.go +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read_test.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. -package deploymentresource +package v2 import ( "testing" @@ -27,20 +27,19 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/util/ec" ) -func Test_flattenEssResource(t *testing.T) { +func Test_readEnterpriseSearch(t *testing.T) { type args struct { - in []*models.EnterpriseSearchResourceInfo - name string + in []*models.EnterpriseSearchResourceInfo } tests := []struct { name string args args - want []interface{} + want *EnterpriseSearch }{ { name: "empty resource list returns empty list", args: args{in: []*models.EnterpriseSearchResourceInfo{}}, - want: []interface{}{}, + want: nil, }, { name: "empty current plan returns empty list", @@ -53,7 +52,7 @@ func Test_flattenEssResource(t *testing.T) { }, }, }}, - want: []interface{}{}, + want: nil, }, { name: "parses the enterprisesearch resource", @@ -155,35 +154,32 @@ func Test_flattenEssResource(t *testing.T) { }, }, }}, - want: []interface{}{ - map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-enterprise_search", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "http_endpoint": "http://enterprisesearchresource.cloud.elastic.co:9200", - "https_endpoint": "https://enterprisesearchresource.cloud.elastic.co:9243", - "config": []interface{}{map[string]interface{}{ - "user_settings_json": "{\"some.setting\":\"some other value\"}", - "user_settings_override_json": "{\"some.setting\":\"some other override\"}", - "user_settings_override_yaml": "some.setting: some override", - "user_settings_yaml": "some.setting: some value", - }}, - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.enterprisesearch.r4", - "size": "1g", - "size_resource": "memory", - "zone_count": int32(1), - "node_type_appserver": true, - "node_type_worker": false, - }}, + want: &EnterpriseSearch{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-enterprise_search"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + HttpEndpoint: ec.String("http://enterprisesearchresource.cloud.elastic.co:9200"), + HttpsEndpoint: ec.String("https://enterprisesearchresource.cloud.elastic.co:9243"), + Config: &EnterpriseSearchConfig{ + UserSettingsJson: ec.String("{\"some.setting\":\"some other value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\":\"some other override\"}"), + UserSettingsOverrideYaml: ec.String("some.setting: some override"), + UserSettingsYaml: ec.String("some.setting: some value"), }, + InstanceConfigurationId: ec.String("aws.enterprisesearch.r4"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + NodeTypeAppserver: ec.Bool(true), + NodeTypeWorker: ec.Bool(false), }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenEssResources(tt.args.in, tt.args.name) + got, err := ReadEnterpriseSearches(tt.args.in) + assert.Nil(t, err) assert.Equal(t, tt.want, got) }) } diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_topology.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_topology.go new file mode 100644 index 000000000..44ffd3839 --- /dev/null +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_topology.go @@ -0,0 +1,161 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "fmt" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v1" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/diag" +) + +const ( + minimumApmSize = 512 + minimumEnterpriseSearchSize = 2048 +) + +type EnterpriseSearchTopologies v1.EnterpriseSearchTopologies + +func ReadEnterpriseSearchTopology(in *models.EnterpriseSearchTopologyElement) (*v1.EnterpriseSearchTopology, error) { + var topology v1.EnterpriseSearchTopology + + topology.InstanceConfigurationId = ec.String(in.InstanceConfigurationID) + + if in.Size != nil { + topology.Size = ec.String(util.MemoryToState(*in.Size.Value)) + topology.SizeResource = in.Size.Resource + } + + if nt := in.NodeType; nt != nil { + if nt.Appserver != nil { + topology.NodeTypeAppserver = nt.Appserver + } + + if nt.Connector != nil { + topology.NodeTypeConnector = nt.Connector + } + + if nt.Worker != nil { + topology.NodeTypeWorker = nt.Worker + } + } + + topology.ZoneCount = int(in.ZoneCount) + + return &topology, nil +} + +func ReadEnterpriseSearchTopologies(in []*models.EnterpriseSearchTopologyElement) (EnterpriseSearchTopologies, error) { + if len(in) == 0 { + return nil, nil + } + + topologies := make(EnterpriseSearchTopologies, 0, len(in)) + for _, model := range in { + if model.Size == nil || model.Size.Value == nil || *model.Size.Value == 0 { + continue + } + + topology, err := ReadEnterpriseSearchTopology(model) + if err != nil { + return nil, err + } + + topologies = append(topologies, *topology) + } + + return topologies, nil +} + +func enterpriseSearchTopologyPayload(ctx context.Context, topology v1.EnterpriseSearchTopologyTF, planModels []*models.EnterpriseSearchTopologyElement, index int) (*models.EnterpriseSearchTopologyElement, diag.Diagnostics) { + var diags diag.Diagnostics + + icID := topology.InstanceConfigurationId.Value + + // When a topology element is set but no instance_configuration_id + // is set, then obtain the instance_configuration_id from the topology + // element. + if icID == "" && index < len(planModels) { + icID = planModels[index].InstanceConfigurationID + } + + elem, err := matchEssTopology(icID, planModels) + if err != nil { + diags.AddError("cannot match enterprise search topology", err.Error()) + return nil, diags + } + + size, err := converters.ParseTopologySizeTF(topology.Size, topology.SizeResource) + + if err != nil { + diags.AddError("failed parse enterprise search topology size", err.Error()) + return nil, diags + } + + // Since Enterprise Search is not enabled by default in the template, + // if the size == nil, it means that the size hasn't been specified in + // the definition. + if size == nil { + size = &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(minimumEnterpriseSearchSize), + } + } + + elem.Size = size + + if topology.ZoneCount.Value > 0 { + elem.ZoneCount = int32(topology.ZoneCount.Value) + } + + return elem, nil +} + +// defaultApmTopology iterates over all the templated topology elements and +// sets the size to the default when the template size is smaller than the +// deployment template default, the same is done on the ZoneCount. +func defaultEssTopology(topology []*models.EnterpriseSearchTopologyElement) []*models.EnterpriseSearchTopologyElement { + for _, t := range topology { + if *t.Size.Value < minimumEnterpriseSearchSize || *t.Size.Value == 0 { + t.Size.Value = ec.Int32(minimumEnterpriseSearchSize) + } + if t.ZoneCount < utils.MinimumZoneCount { + t.ZoneCount = utils.MinimumZoneCount + } + } + + return topology +} + +func matchEssTopology(id string, topologies []*models.EnterpriseSearchTopologyElement) (*models.EnterpriseSearchTopologyElement, error) { + for _, t := range topologies { + if t.InstanceConfigurationID == id { + return t, nil + } + } + return nil, fmt.Errorf( + `invalid instance_configuration_id: "%s" doesn't match any of the deployment template instance configurations`, + id, + ) +} diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/schema.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/schema.go new file mode 100644 index 000000000..fad6817cb --- /dev/null +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/schema.go @@ -0,0 +1,168 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func EnterpriseSearchSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Enterprise Search resource definition", + Optional: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "elasticsearch_cluster_ref_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), + resource.UseStateForUnknown(), + }, + }, + "ref_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-enterprise_search"}), + resource.UseStateForUnknown(), + }, + }, + "resource_id": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "region": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "http_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "https_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "instance_configuration_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "size": { + Type: types.StringType, + Computed: true, + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "size_resource": { + Type: types.StringType, + Description: `Optional size type, defaults to "memory".`, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "memory"}), + resource.UseStateForUnknown(), + }, + }, + "zone_count": { + Type: types.Int64Type, + Computed: true, + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "node_type_appserver": { + Type: types.BoolType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "node_type_connector": { + Type: types.BoolType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "node_type_worker": { + Type: types.BoolType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "config": { + Description: `Optionally define the Enterprise Search configuration options for the Enterprise Search Server`, + Optional: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + // TODO + // DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + "docker_image": { + Type: types.StringType, + Description: "Optionally override the docker image the Enterprise Search nodes will use. Note that this field will only work for internal users only.", + Optional: true, + }, + "user_settings_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + }), + }, + }), + } +} diff --git a/ec/ecresource/deploymentresource/expanders.go b/ec/ecresource/deploymentresource/expanders.go deleted file mode 100644 index 108fb7e9d..000000000 --- a/ec/ecresource/deploymentresource/expanders.go +++ /dev/null @@ -1,374 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "fmt" - "sort" - - "github.com/blang/semver/v4" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deptemplateapi" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -var ( - dataTiersVersion = semver.MustParse("7.10.0") -) - -func createResourceToModel(d *schema.ResourceData, client *api.API) (*models.DeploymentCreateRequest, error) { - var result = models.DeploymentCreateRequest{ - Name: d.Get("name").(string), - Alias: d.Get("alias").(string), - Resources: &models.DeploymentCreateResources{}, - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{}, - } - - dtID := d.Get("deployment_template_id").(string) - version := d.Get("version").(string) - template, err := deptemplateapi.Get(deptemplateapi.GetParams{ - API: client, - TemplateID: dtID, - Region: d.Get("region").(string), - HideInstanceConfigurations: true, - }) - if err != nil { - return nil, err - } - - useNodeRoles, err := compatibleWithNodeRoles(version) - if err != nil { - return nil, err - } - - merr := multierror.NewPrefixed("invalid configuration") - esRes, err := expandEsResources( - d.Get("elasticsearch").([]interface{}), - enrichElasticsearchTemplate( - esResource(template), dtID, version, useNodeRoles, - ), - ) - if err != nil { - merr = merr.Append(err) - } - result.Resources.Elasticsearch = append(result.Resources.Elasticsearch, esRes...) - - kibanaRes, err := expandKibanaResources( - d.Get("kibana").([]interface{}), kibanaResource(template), - ) - if err != nil { - merr = merr.Append(err) - } - result.Resources.Kibana = append(result.Resources.Kibana, kibanaRes...) - - apmRes, err := expandApmResources( - d.Get("apm").([]interface{}), apmResource(template), - ) - if err != nil { - merr = merr.Append(err) - } - result.Resources.Apm = append(result.Resources.Apm, apmRes...) - - integrationsServerRes, err := expandIntegrationsServerResources( - d.Get("integrations_server").([]interface{}), integrationsServerResource(template), - ) - if err != nil { - merr = merr.Append(err) - } - result.Resources.IntegrationsServer = append(result.Resources.IntegrationsServer, integrationsServerRes...) - - enterpriseSearchRes, err := expandEssResources( - d.Get("enterprise_search").([]interface{}), essResource(template), - ) - if err != nil { - merr = merr.Append(err) - } - result.Resources.EnterpriseSearch = append(result.Resources.EnterpriseSearch, enterpriseSearchRes...) - - if err := merr.ErrorOrNil(); err != nil { - return nil, err - } - - expandTrafficFilterCreate(d.Get("traffic_filter").(*schema.Set), &result) - - observability, err := expandObservability(d.Get("observability").([]interface{}), client) - if err != nil { - return nil, err - } - result.Settings.Observability = observability - - result.Metadata.Tags = expandTags(d.Get("tags").(map[string]interface{})) - - return &result, nil -} - -func updateResourceToModel(d *schema.ResourceData, client *api.API) (*models.DeploymentUpdateRequest, error) { - var result = models.DeploymentUpdateRequest{ - Name: d.Get("name").(string), - Alias: d.Get("alias").(string), - PruneOrphans: ec.Bool(true), - Resources: &models.DeploymentUpdateResources{}, - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{}, - } - - dtID := d.Get("deployment_template_id").(string) - version := d.Get("version").(string) - template, err := deptemplateapi.Get(deptemplateapi.GetParams{ - API: client, - TemplateID: dtID, - Region: d.Get("region").(string), - HideInstanceConfigurations: true, - }) - if err != nil { - return nil, err - } - - es := d.Get("elasticsearch").([]interface{}) - kibana := d.Get("kibana").([]interface{}) - apm := d.Get("apm").([]interface{}) - integrationsServer := d.Get("integrations_server").([]interface{}) - enterpriseSearch := d.Get("enterprise_search").([]interface{}) - - // When the deployment template is changed, we need to unset the missing - // resource topologies to account for a new instance_configuration_id and - // a different default value. - prevDT, _ := d.GetChange("deployment_template_id") - if d.HasChange("deployment_template_id") && prevDT.(string) != "" { - // If the deployment_template_id is changed, then we unset the - // Elasticsearch topology to account for the case where the - // instance_configuration_id changes, i.e. Hot / Warm, etc. - - // This might not be necessary going forward as we move to - // tiered Elasticsearch nodes. - unsetTopology(es) - } - - useNodeRoles, err := compatibleWithNodeRoles(version) - if err != nil { - return nil, err - } - convertLegacy, err := legacyToNodeRoles(d) - if err != nil { - return nil, err - } - useNodeRoles = useNodeRoles && convertLegacy - - merr := multierror.NewPrefixed("invalid configuration") - esRes, err := expandEsResources( - es, enrichElasticsearchTemplate( - esResource(template), dtID, version, useNodeRoles, - ), - ) - if err != nil { - merr = merr.Append(err) - } - result.Resources.Elasticsearch = append(result.Resources.Elasticsearch, esRes...) - - // if the restore snapshot operation has been specified, the snapshot restore - // can't be full once the cluster has been created, so the Strategy must be set - // to "partial". - ensurePartialSnapshotStrategy(esRes) - - kibanaRes, err := expandKibanaResources(kibana, kibanaResource(template)) - if err != nil { - merr = merr.Append(err) - } - result.Resources.Kibana = append(result.Resources.Kibana, kibanaRes...) - - apmRes, err := expandApmResources(apm, apmResource(template)) - if err != nil { - merr = merr.Append(err) - } - result.Resources.Apm = append(result.Resources.Apm, apmRes...) - - integrationsServerRes, err := expandIntegrationsServerResources(integrationsServer, integrationsServerResource(template)) - if err != nil { - merr = merr.Append(err) - } - result.Resources.IntegrationsServer = append(result.Resources.IntegrationsServer, integrationsServerRes...) - - enterpriseSearchRes, err := expandEssResources(enterpriseSearch, essResource(template)) - if err != nil { - merr = merr.Append(err) - } - result.Resources.EnterpriseSearch = append(result.Resources.EnterpriseSearch, enterpriseSearchRes...) - - if err := merr.ErrorOrNil(); err != nil { - return nil, err - } - - observability, err := expandObservability(d.Get("observability").([]interface{}), client) - if err != nil { - return nil, err - } - result.Settings.Observability = observability - - // In order to stop shipping logs and metrics, an empty Observability - // object must be passed, as opposed to a nil object when creating a - // deployment without observability settings. - if util.ObjectRemoved(d, "observability") { - result.Settings.Observability = &models.DeploymentObservabilitySettings{} - } - - result.Metadata.Tags = expandTags(d.Get("tags").(map[string]interface{})) - - return &result, nil -} - -func enrichElasticsearchTemplate(tpl *models.ElasticsearchPayload, dt, version string, useNodeRoles bool) *models.ElasticsearchPayload { - if tpl.Plan.DeploymentTemplate == nil { - tpl.Plan.DeploymentTemplate = &models.DeploymentTemplateReference{} - } - - if tpl.Plan.DeploymentTemplate.ID == nil || *tpl.Plan.DeploymentTemplate.ID == "" { - tpl.Plan.DeploymentTemplate.ID = ec.String(dt) - } - - if tpl.Plan.Elasticsearch.Version == "" { - tpl.Plan.Elasticsearch.Version = version - } - - for _, topology := range tpl.Plan.ClusterTopology { - if useNodeRoles { - topology.NodeType = nil - continue - } - topology.NodeRoles = nil - } - - return tpl -} - -func unsetTopology(rawRes []interface{}) { - for _, r := range rawRes { - delete(r.(map[string]interface{}), "topology") - } -} - -func expandTags(raw map[string]interface{}) []*models.MetadataItem { - result := make([]*models.MetadataItem, 0, len(raw)) - for k, v := range raw { - result = append(result, &models.MetadataItem{ - Key: ec.String(k), - Value: ec.String(v.(string)), - }) - } - - // Sort by key - sort.SliceStable(result, func(i, j int) bool { - return *result[i].Key < *result[j].Key - }) - - return result -} - -func compatibleWithNodeRoles(version string) (bool, error) { - deploymentVersion, err := semver.Parse(version) - if err != nil { - return false, fmt.Errorf("failed to parse Elasticsearch version: %w", err) - } - - return deploymentVersion.GE(dataTiersVersion), nil -} - -func ensurePartialSnapshotStrategy(ess []*models.ElasticsearchPayload) { - for _, es := range ess { - transient := es.Plan.Transient - if transient == nil || transient.RestoreSnapshot == nil { - continue - } - transient.RestoreSnapshot.Strategy = "partial" - } -} - -// legacyToNodeRoles returns true when the legacy "node_type_*" should be -// migrated over to node_roles. Which will be true when: -// * The version field doesn't change. -// * The version field changes but: -// - The Elasticsearch.0.toplogy doesn't have any node_type_* set. -func legacyToNodeRoles(d *schema.ResourceData) (bool, error) { - if !d.HasChange("version") { - return true, nil - } - - oldVRaw, newVRaw := d.GetChange("version") - oldVS, newVS := oldVRaw.(string), newVRaw.(string) - - // If the previous version is empty, node_roles should be used. - if oldVS == "" { - return true, nil - } - - oldV, err := semver.Parse(oldVS) - if err != nil { - return false, fmt.Errorf("failed to parse previous Elasticsearch version: %w", err) - } - newV, err := semver.Parse(newVS) - if err != nil { - return false, fmt.Errorf("failed to parse previous Elasticsearch version: %w", err) - } - - // if the version change moves from non-node_roles to one - // that supports node roles, do not migrate on that step. - if oldV.LT(dataTiersVersion) && newV.GE(dataTiersVersion) { - return false, nil - } - - // When any topology elements in the state have the node_type_* - // properties set, the node_role field cannot be used, since - // we'd be changing the version AND migrating over `node_role`s - // which is not permitted by the API. - var hasNodeTypeSet bool - for _, t := range d.Get("elasticsearch.0.topology").([]interface{}) { - top := t.(map[string]interface{}) - if nt, ok := top["node_type_data"]; ok { - if nt.(string) != "" { - hasNodeTypeSet = true - } - } - if nt, ok := top["node_type_ingest"]; ok { - if nt.(string) != "" { - hasNodeTypeSet = true - } - } - if nt, ok := top["node_type_master"]; ok { - if nt.(string) != "" { - hasNodeTypeSet = true - } - } - if nt, ok := top["node_type_ml"]; ok { - if nt.(string) != "" { - hasNodeTypeSet = true - } - } - } - - if hasNodeTypeSet { - return false, nil - } - - return true, nil -} diff --git a/ec/ecresource/deploymentresource/expanders_test.go b/ec/ecresource/deploymentresource/expanders_test.go deleted file mode 100644 index 51d07c2f6..000000000 --- a/ec/ecresource/deploymentresource/expanders_test.go +++ /dev/null @@ -1,5119 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "bytes" - "errors" - "io" - "os" - "testing" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" - - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -func fileAsResponseBody(t *testing.T, name string) io.ReadCloser { - t.Helper() - f, err := os.Open(name) - if err != nil { - t.Fatal(err) - } - defer f.Close() - - var buf = new(bytes.Buffer) - if _, err := io.Copy(buf, f); err != nil { - t.Fatal(err) - } - buf.WriteString("\n") - - return io.NopCloser(buf) -} - -func Test_createResourceToModel(t *testing.T) { - deploymentRD := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - deploymentNodeRolesRD := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleDeployment(), - Schema: newSchema(), - }) - ioOptimizedTpl := func() io.ReadCloser { - return fileAsResponseBody(t, "testdata/template-aws-io-optimized-v2.json") - } - deploymentOverrideRd := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleDeploymentOverrides(), - Schema: newSchema(), - }) - deploymentOverrideICRd := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleDeploymentOverridesIC(), - Schema: newSchema(), - }) - hotWarmTpl := func() io.ReadCloser { - return fileAsResponseBody(t, "testdata/template-aws-hot-warm-v2.json") - } - deploymentHotWarm := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-hot-warm-v2", - "region": "us-east-1", - "version": "7.9.2", - "elasticsearch": []interface{}{map[string]interface{}{}}, - "kibana": []interface{}{map[string]interface{}{}}, - }, - }) - - ccsTpl := func() io.ReadCloser { - return fileAsResponseBody(t, "testdata/template-aws-cross-cluster-search-v2.json") - } - deploymentCCS := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-cross-cluster-search-v2", - "region": "us-east-1", - "version": "7.9.2", - "elasticsearch": []interface{}{map[string]interface{}{}}, - "kibana": []interface{}{map[string]interface{}{}}, - }, - }) - - emptyTpl := func() io.ReadCloser { - return fileAsResponseBody(t, "testdata/template-empty.json") - } - deploymentEmptyTemplate := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "empty-deployment-template", - "region": "us-east-1", - "version": "7.9.2", - "elasticsearch": []interface{}{map[string]interface{}{}}, - "kibana": []interface{}{map[string]interface{}{}}, - "apm": []interface{}{map[string]interface{}{}}, - "enterprise_search": []interface{}{map[string]interface{}{}}, - }, - }) - - deploymentWithTags := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.10.1", - "elasticsearch": []interface{}{ - map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "8g", - }}, - }, - }, - "tags": map[string]interface{}{ - "aaa": "bbb", - "owner": "elastic", - "cost-center": "rnd", - }, - }, - Schema: newSchema(), - }) - - type args struct { - d *schema.ResourceData - client *api.API - } - tests := []struct { - name string - args args - want *models.DeploymentCreateRequest - err error - }{ - { - name: "parses the resources", - args: args{ - d: deploymentNodeRolesRD, - client: api.NewMock( - mock.New200Response(hotWarmTpl()), - mock.New200Response( - mock.NewStructBody(models.DeploymentGetResponse{ - Healthy: ec.Bool(true), - ID: ec.String(mock.ValidClusterID), - Resources: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{{ - ID: ec.String(mock.ValidClusterID), - RefID: ec.String("main-elasticsearch"), - }}, - }, - }), - ), - ), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Alias: "my-deployment", - Settings: &models.DeploymentCreateSettings{ - TrafficFilterSettings: &models.TrafficFilterSettings{ - Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, - }, - Observability: &models.DeploymentObservabilitySettings{ - Logging: &models.DeploymentLoggingSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: &mock.ValidClusterID, - RefID: "main-elasticsearch", - }, - }, - Metrics: &models.DeploymentMetricsSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: &mock.ValidClusterID, - RefID: "main-elasticsearch", - }, - }, - }, - }, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, hotWarmTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.11.1", - UserSettingsYaml: `some.setting: value`, - UserSettingsOverrideYaml: `some.setting: value2`, - UserSettingsJSON: map[string]interface{}{ - "some.setting": "value", - }, - UserSettingsOverrideJSON: map[string]interface{}{ - "some.setting": "value2", - }, - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-hot-warm-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "hot_content", - ZoneCount: 1, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - NodeRoles: []string{ - "data_content", - "data_hot", - "ingest", - "master", - "remote_cluster_client", - "transform", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - { - ID: "warm", - ZoneCount: 1, - InstanceConfigurationID: "aws.data.highstorage.d2", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "warm"}, - }, - NodeRoles: []string{ - "data_warm", - "remote_cluster_client", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - Kibana: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - Apm: []*models.ApmPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{ - SystemSettings: &models.ApmSystemSettings{ - DebugEnabled: ec.Bool(false), - }, - }, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(512), - }, - }}, - }, - }, - }, - EnterpriseSearch: []*models.EnterpriseSearchPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "parses the legacy resources", - args: args{ - d: deploymentRD, - client: api.NewMock( - mock.New200Response(ioOptimizedTpl()), - mock.New200Response( - mock.NewStructBody(models.DeploymentGetResponse{ - Healthy: ec.Bool(true), - ID: ec.String(mock.ValidClusterID), - Resources: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{{ - ID: ec.String(mock.ValidClusterID), - RefID: ec.String("main-elasticsearch"), - }}, - }, - }), - ), - ), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Alias: "my-deployment", - Settings: &models.DeploymentCreateSettings{ - TrafficFilterSettings: &models.TrafficFilterSettings{ - Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, - }, - Observability: &models.DeploymentObservabilitySettings{ - Logging: &models.DeploymentLoggingSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: &mock.ValidClusterID, - RefID: "main-elasticsearch", - }, - }, - Metrics: &models.DeploymentMetricsSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: &mock.ValidClusterID, - RefID: "main-elasticsearch", - }, - }, - }, - }, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.7.0", - UserSettingsYaml: `some.setting: value`, - UserSettingsOverrideYaml: `some.setting: value2`, - UserSettingsJSON: map[string]interface{}{ - "some.setting": "value", - }, - UserSettingsOverrideJSON: map[string]interface{}{ - "some.setting": "value2", - }, - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - ZoneCount: 1, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - Ml: ec.Bool(false), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - Kibana: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - Apm: []*models.ApmPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{ - SystemSettings: &models.ApmSystemSettings{ - DebugEnabled: ec.Bool(false), - }, - }, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(512), - }, - }}, - }, - }, - }, - EnterpriseSearch: []*models.EnterpriseSearchPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "parses the resources with empty declarations (IO Optimized)", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.7.0", - "elasticsearch": []interface{}{map[string]interface{}{}}, - "kibana": []interface{}{map[string]interface{}{}}, - "apm": []interface{}{map[string]interface{}{}}, - "enterprise_search": []interface{}{map[string]interface{}{}}, - "traffic_filter": []interface{}{"0.0.0.0/0", "192.168.10.0/24"}, - }, - Schema: newSchema(), - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{ - TrafficFilterSettings: &models.TrafficFilterSettings{ - Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, - }, - }, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.7.0", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - Kibana: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - Apm: []*models.ApmPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{}, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(512), - }, - }}, - }, - }, - }, - EnterpriseSearch: []*models.EnterpriseSearchPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{ - { - ZoneCount: 2, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "parses the resources with empty declarations (IO Optimized) with node_roles", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.11.0", - "elasticsearch": []interface{}{map[string]interface{}{}}, - "kibana": []interface{}{map[string]interface{}{}}, - "apm": []interface{}{map[string]interface{}{}}, - "enterprise_search": []interface{}{map[string]interface{}{}}, - "traffic_filter": []interface{}{"0.0.0.0/0", "192.168.10.0/24"}, - }, - Schema: newSchema(), - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{ - TrafficFilterSettings: &models.TrafficFilterSettings{ - Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, - }, - }, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.11.0", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - Kibana: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - Apm: []*models.ApmPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{}, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(512), - }, - }}, - }, - }, - }, - EnterpriseSearch: []*models.EnterpriseSearchPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{ - { - ZoneCount: 2, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "parses the resources with topology overrides (size)", - args: args{ - d: deploymentOverrideRd, - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Alias: "my-deployment", - Settings: &models.DeploymentCreateSettings{ - TrafficFilterSettings: &models.TrafficFilterSettings{ - Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, - }, - }, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.7.0", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - Kibana: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - }, - }, - }, - }, - }, - Apm: []*models.ApmPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{}, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }}, - }, - }, - }, - EnterpriseSearch: []*models.EnterpriseSearchPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{ - { - ZoneCount: 2, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "parses the resources with topology overrides (IC)", - args: args{ - d: deploymentOverrideICRd, - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Alias: "my-deployment", - Settings: &models.DeploymentCreateSettings{ - TrafficFilterSettings: &models.TrafficFilterSettings{ - Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, - }, - }, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.7.0", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - Kibana: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - Apm: []*models.ApmPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{}, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(512), - }, - }}, - }, - }, - }, - EnterpriseSearch: []*models.EnterpriseSearchPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{ - { - ZoneCount: 2, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "parses the resources with empty declarations (Hot Warm)", - args: args{ - d: deploymentHotWarm, - client: api.NewMock(mock.New200Response(hotWarmTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, hotWarmTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - Curation: nil, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Curation: nil, - Version: "7.9.2", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-hot-warm-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "hot_content", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - { - ID: "warm", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highstorage.d2", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(false), - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "warm", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - Kibana: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "parses the resources with empty declarations (Hot Warm) with node_roles", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-hot-warm-v2", - "region": "us-east-1", - "version": "7.12.0", - "elasticsearch": []interface{}{map[string]interface{}{}}, - "kibana": []interface{}{map[string]interface{}{}}, - }, - }), - client: api.NewMock(mock.New200Response(hotWarmTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, hotWarmTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - Curation: nil, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Curation: nil, - Version: "7.12.0", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-hot-warm-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "hot_content", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - { - ID: "warm", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highstorage.d2", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeRoles: []string{ - "data_warm", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "warm", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - Kibana: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "parses the resources with empty declarations (Hot Warm) with node_roles and extensions", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-hot-warm-v2", - "region": "us-east-1", - "version": "7.12.0", - "elasticsearch": []interface{}{map[string]interface{}{ - "extension": []interface{}{ - map[string]interface{}{ - "name": "my-plugin", - "type": "plugin", - "url": "repo://12311234", - "version": "7.7.0", - }, - map[string]interface{}{ - "name": "my-second-plugin", - "type": "plugin", - "url": "repo://12311235", - "version": "7.7.0", - }, - map[string]interface{}{ - "name": "my-bundle", - "type": "bundle", - "url": "repo://1231122", - "version": "7.7.0", - }, - map[string]interface{}{ - "name": "my-second-bundle", - "type": "bundle", - "url": "repo://1231123", - "version": "7.7.0", - }, - }, - }}, - }, - }), - client: api.NewMock(mock.New200Response(hotWarmTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, hotWarmTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.12.0", - UserBundles: []*models.ElasticsearchUserBundle{ - { - URL: ec.String("repo://1231122"), - Name: ec.String("my-bundle"), - ElasticsearchVersion: ec.String("7.7.0"), - }, - { - URL: ec.String("repo://1231123"), - Name: ec.String("my-second-bundle"), - ElasticsearchVersion: ec.String("7.7.0"), - }, - }, - UserPlugins: []*models.ElasticsearchUserPlugin{ - { - URL: ec.String("repo://12311235"), - Name: ec.String("my-second-plugin"), - ElasticsearchVersion: ec.String("7.7.0"), - }, - { - URL: ec.String("repo://12311234"), - Name: ec.String("my-plugin"), - ElasticsearchVersion: ec.String("7.7.0"), - }, - }, - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-hot-warm-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "hot_content", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - { - ID: "warm", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highstorage.d2", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeRoles: []string{ - "data_warm", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "warm", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - }, - }, - }, - { - name: "deployment with autoscaling enabled", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.12.0", - "elasticsearch": []interface{}{map[string]interface{}{ - "autoscale": "true", - "topology": []interface{}{ - map[string]interface{}{ - "id": "cold", - "size": "2g", - }, - map[string]interface{}{ - "id": "hot_content", - "size": "8g", - }, - map[string]interface{}{ - "id": "warm", - "size": "4g", - }, - }, - }}, - }, - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(true), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.12.0", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "cold", - ZoneCount: 1, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeRoles: []string{ - "data_cold", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "cold", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(59392), - Resource: ec.String("memory"), - }, - }, - { - ID: "hot_content", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - { - ID: "warm", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeRoles: []string{ - "data_warm", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "warm", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - }, - }, - }, - { - name: "deployment with autoscaling enabled and custom policies set", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.12.0", - "elasticsearch": []interface{}{map[string]interface{}{ - "autoscale": "true", - "topology": []interface{}{ - map[string]interface{}{ - "id": "cold", - "size": "2g", - }, - map[string]interface{}{ - "id": "hot_content", - "size": "8g", - "autoscaling": []interface{}{map[string]interface{}{ - "max_size": "232g", - }}, - }, - map[string]interface{}{ - "id": "warm", - "size": "4g", - "autoscaling": []interface{}{map[string]interface{}{ - "max_size": "116g", - }}, - }, - }, - }}, - }, - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(true), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.12.0", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "cold", - ZoneCount: 1, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeRoles: []string{ - "data_cold", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "cold", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(59392), - Resource: ec.String("memory"), - }, - }, - { - ID: "hot_content", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(237568), - Resource: ec.String("memory"), - }, - }, - { - ID: "warm", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeRoles: []string{ - "data_warm", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "warm", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - }, - }, - }, - { - name: "deployment with dedicated master and cold tiers", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.12.0", - "elasticsearch": []interface{}{map[string]interface{}{ - "topology": []interface{}{ - map[string]interface{}{ - "id": "cold", - "size": "2g", - }, - map[string]interface{}{ - "id": "hot_content", - "size": "8g", - }, - map[string]interface{}{ - "id": "master", - "size": "1g", - "zone_count": 3, - }, - map[string]interface{}{ - "id": "warm", - "size": "4g", - }, - }, - }}, - }, - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.12.0", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "cold", - ZoneCount: 1, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeRoles: []string{ - "data_cold", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "cold", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(59392), - Resource: ec.String("memory"), - }, - }, - { - ID: "hot_content", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - { - ID: "master", - ZoneCount: 3, - InstanceConfigurationID: "aws.master.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - NodeRoles: []string{ - "master", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{}, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - }, - { - ID: "warm", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeRoles: []string{ - "data_warm", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "warm", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - }, - }, - }, - { - name: "deployment with dedicated coordinating and cold tiers", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.12.0", - "elasticsearch": []interface{}{map[string]interface{}{ - "topology": []interface{}{ - map[string]interface{}{ - "id": "cold", - "size": "2g", - }, - map[string]interface{}{ - "id": "coordinating", - "size": "2g", - "zone_count": 2, - }, - map[string]interface{}{ - "id": "hot_content", - "size": "8g", - }, - map[string]interface{}{ - "id": "warm", - "size": "4g", - }, - }, - }}, - }, - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.12.0", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "cold", - ZoneCount: 1, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeRoles: []string{ - "data_cold", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "cold", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(59392), - Resource: ec.String("memory"), - }, - }, - { - ID: "coordinating", - ZoneCount: 2, - InstanceConfigurationID: "aws.coordinating.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeRoles: []string{ - "ingest", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{}, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - }, - { - ID: "hot_content", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "master", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - { - ID: "warm", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeRoles: []string{ - "data_warm", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "warm", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - }, - }, - }, - { - name: "deployment with dedicated coordinating, master and cold tiers", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.12.0", - "elasticsearch": []interface{}{map[string]interface{}{ - "topology": []interface{}{ - map[string]interface{}{ - "id": "cold", - "size": "2g", - }, - map[string]interface{}{ - "id": "coordinating", - "size": "2g", - "zone_count": 2, - }, - map[string]interface{}{ - "id": "hot_content", - "size": "8g", - }, - map[string]interface{}{ - "id": "master", - "size": "1g", - "zone_count": 3, - }, - map[string]interface{}{ - "id": "warm", - "size": "4g", - }, - }, - }}, - }, - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.12.0", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "cold", - ZoneCount: 1, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeRoles: []string{ - "data_cold", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "cold", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(59392), - Resource: ec.String("memory"), - }, - }, - { - ID: "coordinating", - ZoneCount: 2, - InstanceConfigurationID: "aws.coordinating.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeRoles: []string{ - "ingest", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{}, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - }, - { - ID: "hot_content", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - { - ID: "master", - ZoneCount: 3, - InstanceConfigurationID: "aws.master.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - NodeRoles: []string{ - "master", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{}, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - }, - { - ID: "warm", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeRoles: []string{ - "data_warm", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "warm", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - }, - }, - }, - // - { - name: "deployment with docker_image overrides", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.14.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "config": []interface{}{map[string]interface{}{ - "docker_image": "docker.elastic.com/elasticsearch/container:7.14.1-hash", - }}, - "autoscale": "false", - "trust_account": []interface{}{ - map[string]interface{}{ - "account_id": "ANID", - "trust_all": "true", - }, - }, - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "size": "8g", - }, - }, - }}, - "kibana": []interface{}{map[string]interface{}{ - "config": []interface{}{map[string]interface{}{ - "docker_image": "docker.elastic.com/kibana/container:7.14.1-hash", - }}, - }}, - "apm": []interface{}{map[string]interface{}{ - "config": []interface{}{map[string]interface{}{ - "docker_image": "docker.elastic.com/apm/container:7.14.1-hash", - }}, - }}, - "enterprise_search": []interface{}{map[string]interface{}{ - "config": []interface{}{map[string]interface{}{ - "docker_image": "docker.elastic.com/enterprise_search/container:7.14.1-hash", - }}, - }}, - }, - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - Trust: &models.ElasticsearchClusterTrustSettings{ - Accounts: []*models.AccountTrustRelationship{ - { - AccountID: ec.String("ANID"), - TrustAll: ec.Bool(true), - }, - }, - }, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.14.1", - DockerImage: "docker.elastic.com/elasticsearch/container:7.14.1-hash", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "hot_content", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - Apm: []*models.ApmPayload{{ - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{ - DockerImage: "docker.elastic.com/apm/container:7.14.1-hash", - SystemSettings: &models.ApmSystemSettings{ - DebugEnabled: ec.Bool(false), - }, - }, - ClusterTopology: []*models.ApmTopologyElement{{ - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(512), - }, - ZoneCount: 1, - }}, - }, - RefID: ec.String("main-apm"), - Region: ec.String("us-east-1"), - }}, - Kibana: []*models.KibanaPayload{{ - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{ - DockerImage: "docker.elastic.com/kibana/container:7.14.1-hash", - }, - ClusterTopology: []*models.KibanaClusterTopologyElement{{ - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - ZoneCount: 1, - }}, - }, - RefID: ec.String("main-kibana"), - Region: ec.String("us-east-1"), - }}, - EnterpriseSearch: []*models.EnterpriseSearchPayload{{ - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{ - DockerImage: "docker.elastic.com/enterprise_search/container:7.14.1-hash", - }, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - ZoneCount: 2, - }}, - }, - RefID: ec.String("main-enterprise_search"), - Region: ec.String("us-east-1"), - }}, - }, - }, - }, - { - name: "deployment with trust settings set", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.12.0", - "elasticsearch": []interface{}{map[string]interface{}{ - "autoscale": "false", - "trust_account": []interface{}{ - map[string]interface{}{ - "account_id": "ANID", - "trust_all": "true", - }, - map[string]interface{}{ - "account_id": "anotherID", - "trust_all": "false", - "trust_allowlist": []interface{}{ - "abc", "hij", "dfg", - }, - }, - }, - "trust_external": []interface{}{ - map[string]interface{}{ - "relationship_id": "external_id", - "trust_all": "true", - }, - map[string]interface{}{ - "relationship_id": "another_external_id", - "trust_all": "false", - "trust_allowlist": []interface{}{ - "abc", "dfg", - }, - }, - }, - "topology": []interface{}{ - map[string]interface{}{ - "id": "cold", - "size": "2g", - }, - map[string]interface{}{ - "id": "hot_content", - "size": "8g", - "autoscaling": []interface{}{map[string]interface{}{ - "max_size": "232g", - }}, - }, - map[string]interface{}{ - "id": "warm", - "size": "4g", - "autoscaling": []interface{}{map[string]interface{}{ - "max_size": "116g", - }}, - }, - }, - }}, - }, - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - Trust: &models.ElasticsearchClusterTrustSettings{ - Accounts: []*models.AccountTrustRelationship{ - { - AccountID: ec.String("ANID"), - TrustAll: ec.Bool(true), - }, - { - AccountID: ec.String("anotherID"), - TrustAll: ec.Bool(false), - TrustAllowlist: []string{ - "abc", "dfg", "hij", - }, - }, - }, - External: []*models.ExternalTrustRelationship{ - { - TrustRelationshipID: ec.String("external_id"), - TrustAll: ec.Bool(true), - }, - { - TrustRelationshipID: ec.String("another_external_id"), - TrustAll: ec.Bool(false), - TrustAllowlist: []string{ - "abc", "dfg", - }, - }, - }, - }, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.12.0", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "cold", - ZoneCount: 1, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeRoles: []string{ - "data_cold", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "cold", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(59392), - Resource: ec.String("memory"), - }, - }, - { - ID: "hot_content", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(237568), - Resource: ec.String("memory"), - }, - }, - { - ID: "warm", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeRoles: []string{ - "data_warm", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "warm", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - }, - }, - }, - { - name: "parses the resources with empty declarations (Cross Cluster Search)", - args: args{ - d: deploymentCCS, - client: api.NewMock(mock.New200Response(ccsTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ccsTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{}, - Plan: &models.ElasticsearchClusterPlan{ - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.9.2", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-cross-cluster-search-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "hot_content", - ZoneCount: 1, - InstanceConfigurationID: "aws.ccs.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }), - Kibana: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "parses the resources with tags", - args: args{ - d: deploymentWithTags, - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{Tags: []*models.MetadataItem{ - {Key: ec.String("aaa"), Value: ec.String("bbb")}, - {Key: ec.String("cost-center"), Value: ec.String("rnd")}, - {Key: ec.String("owner"), Value: ec.String("elastic")}, - }}, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.10.1", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - }, - }, - }, - { - name: "handles a snapshot_source block, leaving the strategy as is", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.10.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "version": "7.10.1", - "snapshot_source": []interface{}{map[string]interface{}{ - "source_elasticsearch_cluster_id": "8c63b87af9e24ea49b8a4bfe550e5fe9", - }}, - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "8g", - }}, - }}, - }, - Schema: newSchema(), - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - Transient: &models.TransientElasticsearchPlanConfiguration{ - RestoreSnapshot: &models.RestoreSnapshotConfiguration{ - SourceClusterID: "8c63b87af9e24ea49b8a4bfe550e5fe9", - SnapshotName: ec.String("__latest_success__"), - }, - }, - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.10.1", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - }, - }, - }, - // This case we're using an empty deployment_template to ensure that - // resources not present in the template cannot be expanded, receiving - // an error instead. - { - name: "parses the resources with empty explicit declarations (Empty deployment template)", - args: args{ - d: deploymentEmptyTemplate, - client: api.NewMock(mock.New200Response(emptyTpl())), - }, - err: multierror.NewPrefixed("invalid configuration", - errors.New("kibana specified but deployment template is not configured for it. Use a different template if you wish to add kibana"), - errors.New("apm specified but deployment template is not configured for it. Use a different template if you wish to add apm"), - errors.New("enterprise_search specified but deployment template is not configured for it. Use a different template if you wish to add enterprise_search"), - ), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := createResourceToModel(tt.args.d, tt.args.client) - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } - assert.Equal(t, tt.want, got) - }) - } -} - -func Test_updateResourceToModel(t *testing.T) { - deploymentRD := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - var ioOptimizedTpl = func() io.ReadCloser { - return fileAsResponseBody(t, "testdata/template-aws-io-optimized-v2.json") - } - deploymentEmptyRD := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleDeploymentEmptyRD(), - Schema: newSchema(), - }) - deploymentOverrideRd := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleDeploymentOverrides(), - Schema: newSchema(), - }) - - hotWarmTpl := func() io.ReadCloser { - return fileAsResponseBody(t, "testdata/template-aws-hot-warm-v2.json") - } - deploymentHotWarm := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-hot-warm-v2", - "region": "us-east-1", - "version": "7.9.2", - "elasticsearch": []interface{}{map[string]interface{}{}}, - "kibana": []interface{}{map[string]interface{}{}}, - }, - }) - - ccsTpl := func() io.ReadCloser { - return fileAsResponseBody(t, "testdata/template-aws-cross-cluster-search-v2.json") - } - deploymentEmptyRDWithTemplateChange := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Change: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-cross-cluster-search-v2", - "region": "us-east-1", - "version": "7.9.2", - "elasticsearch": []interface{}{map[string]interface{}{}}, - "kibana": []interface{}{map[string]interface{}{}}, - }, - Schema: newSchema(), - }) - - deploymentEmptyRDWithTemplateChangeWithDiffSize := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.9.2", - "elasticsearch": []interface{}{ - map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "16g", - }}, - }, - map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "coordinating", - "size": "16g", - }}, - }, - }, - "kibana": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "size": "2g", - }}, - }}, - "apm": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "size": "1g", - }}, - }}, - "enterprise_search": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "size": "2g", - }}, - }}, - }, - Change: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-cross-cluster-search-v2", - "region": "us-east-1", - "version": "7.9.2", - "elasticsearch": []interface{}{map[string]interface{}{}}, - "kibana": []interface{}{map[string]interface{}{}}, - }, - Schema: newSchema(), - }) - - emptyTpl := func() io.ReadCloser { - return fileAsResponseBody(t, "testdata/template-empty.json") - } - deploymentChangeFromExplicitSizingToEmpty := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.9.2", - "elasticsearch": []interface{}{ - map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "16g", - }}, - }, - map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "coordinating", - "size": "16g", - }}, - }, - }, - "kibana": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "size": "2g", - }}, - }}, - "apm": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "size": "1g", - }}, - }}, - "enterprise_search": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "size": "8g", - }}, - }}, - }, - Change: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.9.2", - "elasticsearch": []interface{}{map[string]interface{}{}}, - "kibana": []interface{}{map[string]interface{}{}}, - "apm": []interface{}{map[string]interface{}{}}, - "enterprise_search": []interface{}{map[string]interface{}{}}, - }, - Schema: newSchema(), - }) - - deploymentChangeToEmptyDT := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.9.2", - "elasticsearch": []interface{}{ - map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "16g", - }}, - }, - map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "coordinating", - "size": "16g", - }}, - }, - }, - "kibana": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "size": "2g", - }}, - }}, - "apm": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "size": "1g", - }}, - }}, - "enterprise_search": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "size": "8g", - }}, - }}, - }, - Change: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "empty-deployment-template", - "region": "us-east-1", - "version": "7.9.2", - "elasticsearch": []interface{}{map[string]interface{}{}}, - "kibana": []interface{}{map[string]interface{}{}}, - "apm": []interface{}{map[string]interface{}{}}, - "enterprise_search": []interface{}{map[string]interface{}{}}, - }, - Schema: newSchema(), - }) - - deploymentWithTags := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.10.1", - "elasticsearch": []interface{}{ - map[string]interface{}{ - "version": "7.10.1", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "8g", - }}, - }, - }, - }, - Change: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.10.1", - "elasticsearch": []interface{}{ - map[string]interface{}{ - "version": "7.10.1", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "8g", - }}, - }, - }, - "tags": map[string]interface{}{ - "aaa": "bbb", - "owner": "elastic", - "cost-center": "rnd", - }, - }, - Schema: newSchema(), - }) - - type args struct { - d *schema.ResourceData - client *api.API - } - tests := []struct { - name string - args args - want *models.DeploymentUpdateRequest - err error - }{ - { - name: "parses the resources", - args: args{ - d: deploymentRD, - client: api.NewMock( - mock.New200Response(ioOptimizedTpl()), - mock.New200Response( - mock.NewStructBody(models.DeploymentGetResponse{ - Healthy: ec.Bool(true), - ID: ec.String(mock.ValidClusterID), - Resources: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{{ - ID: ec.String(mock.ValidClusterID), - RefID: ec.String("main-elasticsearch"), - }}, - }, - }), - ), - ), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - Alias: "my-deployment", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{ - Observability: &models.DeploymentObservabilitySettings{ - Logging: &models.DeploymentLoggingSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: &mock.ValidClusterID, - RefID: "main-elasticsearch", - }, - }, - Metrics: &models.DeploymentMetricsSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: &mock.ValidClusterID, - RefID: "main-elasticsearch", - }, - }, - }, - }, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.7.0", - UserSettingsYaml: `some.setting: value`, - UserSettingsOverrideYaml: `some.setting: value2`, - UserSettingsJSON: map[string]interface{}{ - "some.setting": "value", - }, - UserSettingsOverrideJSON: map[string]interface{}{ - "some.setting": "value2", - }, - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 1, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - Ml: ec.Bool(false), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - Kibana: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - Apm: []*models.ApmPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{ - SystemSettings: &models.ApmSystemSettings{ - DebugEnabled: ec.Bool(false), - }, - }, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(512), - }, - }}, - }, - }, - }, - EnterpriseSearch: []*models.EnterpriseSearchPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "parses the resources with empty declarations", - args: args{ - d: deploymentEmptyRD, - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - Alias: "my-deployment", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.7.0", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - Kibana: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - Apm: []*models.ApmPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{}, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(512), - }, - }}, - }, - }, - }, - EnterpriseSearch: []*models.EnterpriseSearchPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{ - { - ZoneCount: 2, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "parses the resources with topology overrides", - args: args{ - d: deploymentOverrideRd, - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - Alias: "my-deployment", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.7.0", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - Kibana: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - }, - }, - }, - }, - }, - Apm: []*models.ApmPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{}, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }}, - }, - }, - }, - EnterpriseSearch: []*models.EnterpriseSearchPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{ - { - ZoneCount: 2, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "parses the resources with empty declarations (Hot Warm)", - args: args{ - d: deploymentHotWarm, - client: api.NewMock(mock.New200Response(hotWarmTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, hotWarmTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - Curation: nil, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.9.2", - Curation: nil, - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-hot-warm-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "hot_content", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - { - ID: "warm", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highstorage.d2", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(false), - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "warm"}, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - Kibana: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "toplogy change from hot / warm to cross cluster search", - args: args{ - d: deploymentEmptyRDWithTemplateChange, - client: api.NewMock(mock.New200Response(ccsTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - Alias: "my-deployment", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{ - Observability: &models.DeploymentObservabilitySettings{}, - }, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ccsTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{}, - Plan: &models.ElasticsearchClusterPlan{ - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.9.2", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-cross-cluster-search-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - ZoneCount: 1, - InstanceConfigurationID: "aws.ccs.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }}, - }, - }), - Kibana: []*models.KibanaPayload{{ - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }}, - }, - }, - }, - // The behavior of this change should be: - // * Resets the Elasticsearch topology: from 16g (due to unsetTopology call on DT change). - // * Keeps the kibana toplogy size to 2g even though the topology element has been removed (saved value persists). - // * Removes all other non present resources - { - name: "topology change with sizes not default from io optimized to cross cluster search", - args: args{ - d: deploymentEmptyRDWithTemplateChangeWithDiffSize, - client: api.NewMock(mock.New200Response(ccsTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ccsTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{}, - Plan: &models.ElasticsearchClusterPlan{ - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.9.2", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-cross-cluster-search-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - ZoneCount: 1, - InstanceConfigurationID: "aws.ccs.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - // This field's value is reset. - Value: ec.Int32(1024), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }}, - }, - }), - Kibana: []*models.KibanaPayload{{ - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - }, - }, - }, - }}, - }, - }, - }, - // The behavior of this change should be: - // * Keeps all topology sizes as they were defined (saved value persists). - { - name: "topology change with sizes not default from explicit value to empty", - args: args{ - d: deploymentChangeFromExplicitSizingToEmpty, - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.9.2", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(16384), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - Kibana: []*models.KibanaPayload{{ - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - }, - }, - }, - }}, - Apm: []*models.ApmPayload{{ - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{}, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }}, - }, - }}, - EnterpriseSearch: []*models.EnterpriseSearchPayload{{ - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{ - { - ZoneCount: 2, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }, - }, - }, - }}, - }, - }, - }, - { - name: "does not migrate node_type to node_role on version upgrade that's lower than 7.10.0", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.9.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "16g", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - }}, - }}, - }, - Change: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.11.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "16g", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - }}, - }}, - }, - Schema: newSchema(), - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.11.1", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(16384), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - Ml: ec.Bool(false), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - }, - }, - }, - { - name: "does not migrate node_type to node_role on version upgrade that's higher than 7.10.0", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.10.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "16g", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - }}, - }}, - }, - Change: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.11.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "16g", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - }}, - }}, - }, - Schema: newSchema(), - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.11.1", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(16384), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - Ml: ec.Bool(false), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - }, - }, - }, - { - name: "migrates node_type to node_role when the existing topology element size is updated", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.10.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "16g", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - }}, - }}, - }, - Change: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.10.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "32g", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - }}, - }}, - }, - Schema: newSchema(), - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.10.1", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(32768), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - }, - }, - }, - { - name: "migrates node_type to node_role when the existing topology element size is updated and adds warm tier", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.10.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "16g", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - }}, - }}, - }, - Change: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.10.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "size": "16g", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - }, - map[string]interface{}{ - "id": "warm", - "size": "8g", - }, - }, - }}, - }, - Schema: newSchema(), - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.10.1", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(16384), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - { - ID: "warm", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "warm"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "data_warm", - "remote_cluster_client", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - }, - }, - }, - { - name: "enables autoscaling with the default policies", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.12.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "16g", - }}, - }}, - }, - Change: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.12.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "autoscale": "true", - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "size": "16g", - }, - map[string]interface{}{ - "id": "warm", - "size": "8g", - }, - }, - }}, - }, - Schema: newSchema(), - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(true), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.12.1", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(16384), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - { - ID: "warm", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "warm"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "data_warm", - "remote_cluster_client", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - }, - }, - }, - { - name: "updates topologies configuration", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.12.1", - "elasticsearch": []interface{}{ - map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "16g", - "zone_count": 3, - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "setting: true", - }}, - }}, - }, - map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "master", - "size": "1g", - "zone_count": 3, - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "setting: true", - }}, - }}, - }, - map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "warm", - "size": "8g", - "zone_count": 3, - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "setting: true", - }}, - }}, - }, - }, - }, - Change: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.12.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "size": "16g", - "zone_count": 3, - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "setting: false", - }}, - }, - map[string]interface{}{ - "id": "master", - "size": "1g", - "zone_count": 3, - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "setting: false", - }}, - }, - map[string]interface{}{ - "id": "warm", - "size": "8g", - "zone_count": 3, - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "setting: false", - }}, - }, - }, - }}, - }, - Schema: newSchema(), - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.12.1", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - UserSettingsYaml: "setting: false", - }, - ZoneCount: 3, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(16384), - }, - NodeRoles: []string{ - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - { - ID: "master", - Elasticsearch: &models.ElasticsearchConfiguration{ - UserSettingsYaml: "setting: false", - }, - ZoneCount: 3, - InstanceConfigurationID: "aws.master.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - NodeRoles: []string{ - "master", - "remote_cluster_client", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - }, - { - ID: "warm", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "warm"}, - UserSettingsYaml: "setting: false", - }, - ZoneCount: 3, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "data_warm", - "remote_cluster_client", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - }, - }, - }, - { - name: "parses the resources with tags", - args: args{ - d: deploymentWithTags, - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{Tags: []*models.MetadataItem{ - {Key: ec.String("aaa"), Value: ec.String("bbb")}, - {Key: ec.String("cost-center"), Value: ec.String("rnd")}, - {Key: ec.String("owner"), Value: ec.String("elastic")}, - }}, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.10.1", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - }, - }, - }, - { - name: "handles a snapshot_source block adding Strategy: partial", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.10.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "version": "7.10.1", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "8g", - }}, - }}, - }, - Change: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.10.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "version": "7.10.1", - "snapshot_source": []interface{}{map[string]interface{}{ - "source_elasticsearch_cluster_id": "8c63b87af9e24ea49b8a4bfe550e5fe9", - }}, - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "8g", - }}, - }}, - }, - Schema: newSchema(), - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - Transient: &models.TransientElasticsearchPlanConfiguration{ - RestoreSnapshot: &models.RestoreSnapshotConfiguration{ - SourceClusterID: "8c63b87af9e24ea49b8a4bfe550e5fe9", - SnapshotName: ec.String("__latest_success__"), - Strategy: "partial", - }, - }, - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.10.1", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - }, - }, - }, - { - name: "handles empty Elasticsearch empty config block", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.10.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "version": "7.10.1", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "8g", - "config": []interface{}{}, - }}, - }}, - }, - Schema: newSchema(), - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.10.1", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - }, - }, - }, - { - name: "handles Elasticsearch with topology.config block", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.10.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "version": "7.10.1", - "config": []interface{}{}, - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "8g", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "setting: true", - }}, - }}, - }}, - }, - Schema: newSchema(), - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.10.1", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - UserSettingsYaml: "setting: true", - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - }, - }, - }, - { - name: "topology change with invalid resources returns an error", - args: args{ - d: deploymentChangeToEmptyDT, - client: api.NewMock(mock.New200Response(emptyTpl())), - }, - err: multierror.NewPrefixed("invalid configuration", - errors.New("kibana specified but deployment template is not configured for it. Use a different template if you wish to add kibana"), - errors.New("apm specified but deployment template is not configured for it. Use a different template if you wish to add apm"), - errors.New("enterprise_search specified but deployment template is not configured for it. Use a different template if you wish to add enterprise_search"), - ), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := updateResourceToModel(tt.args.d, tt.args.client) - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } - assert.Equal(t, tt.want, got) - }) - } -} - -func Test_ensurePartialSnapshotStrategy(t *testing.T) { - type args struct { - ess []*models.ElasticsearchPayload - } - tests := []struct { - name string - args args - want []*models.ElasticsearchPayload - }{ - { - name: "ignores resources with no transient block", - args: args{ess: []*models.ElasticsearchPayload{{ - Plan: &models.ElasticsearchClusterPlan{}, - }}}, - want: []*models.ElasticsearchPayload{{ - Plan: &models.ElasticsearchClusterPlan{}, - }}, - }, - { - name: "ignores resources with no transient.snapshot block", - args: args{ess: []*models.ElasticsearchPayload{{ - Plan: &models.ElasticsearchClusterPlan{ - Transient: &models.TransientElasticsearchPlanConfiguration{}, - }, - }}}, - want: []*models.ElasticsearchPayload{{ - Plan: &models.ElasticsearchClusterPlan{ - Transient: &models.TransientElasticsearchPlanConfiguration{}, - }, - }}, - }, - { - name: "Sets strategy to partial", - args: args{ess: []*models.ElasticsearchPayload{{ - Plan: &models.ElasticsearchClusterPlan{ - Transient: &models.TransientElasticsearchPlanConfiguration{ - RestoreSnapshot: &models.RestoreSnapshotConfiguration{ - SourceClusterID: "some", - }, - }, - }, - }}}, - want: []*models.ElasticsearchPayload{{ - Plan: &models.ElasticsearchClusterPlan{ - Transient: &models.TransientElasticsearchPlanConfiguration{ - RestoreSnapshot: &models.RestoreSnapshotConfiguration{ - SourceClusterID: "some", - Strategy: "partial", - }, - }, - }, - }}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ensurePartialSnapshotStrategy(tt.args.ess) - assert.Equal(t, tt.want, tt.args.ess) - }) - } -} diff --git a/ec/ecresource/deploymentresource/flatteners.go b/ec/ecresource/deploymentresource/flatteners.go deleted file mode 100644 index 91aa467d0..000000000 --- a/ec/ecresource/deploymentresource/flatteners.go +++ /dev/null @@ -1,321 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "errors" - "fmt" - "strings" - - "github.com/blang/semver/v4" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/multierror" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -func modelToState(d *schema.ResourceData, res *models.DeploymentGetResponse, remotes models.RemoteResources) error { - if err := d.Set("name", res.Name); err != nil { - return err - } - - if err := d.Set("alias", res.Alias); err != nil { - return err - } - - if res.Metadata != nil { - if err := d.Set("tags", flattenTags(res.Metadata.Tags)); err != nil { - return err - } - } - - if res.Resources != nil { - dt, err := getDeploymentTemplateID(res.Resources) - if err != nil { - return err - } - - if err := d.Set("deployment_template_id", dt); err != nil { - return err - } - - if err := d.Set("region", getRegion(res.Resources)); err != nil { - return err - } - - // We're reconciling the version and storing the lowest version of any - // of the deployment resources. This ensures that if an upgrade fails, - // the state version will be lower than the desired version, making - // retries possible. Once more resource types are added, the function - // needs to be modified to check those as well. - version, err := getLowestVersion(res.Resources) - if err != nil { - // This code path is highly unlikely, but we're bubbling up the - // error in case one of the versions isn't parseable by semver. - return fmt.Errorf("failed reading deployment: %w", err) - } - if err := d.Set("version", version); err != nil { - return err - } - - esFlattened, err := flattenEsResources(res.Resources.Elasticsearch, *res.Name, remotes) - if err != nil { - return err - } - if err := d.Set("elasticsearch", esFlattened); err != nil { - return err - } - - kibanaFlattened := flattenKibanaResources(res.Resources.Kibana, *res.Name) - if len(kibanaFlattened) > 0 { - if err := d.Set("kibana", kibanaFlattened); err != nil { - return err - } - } - - apmFlattened := flattenApmResources(res.Resources.Apm, *res.Name) - if len(apmFlattened) > 0 { - if err := d.Set("apm", apmFlattened); err != nil { - return err - } - } - - integrationsServerFlattened := flattenIntegrationsServerResources(res.Resources.IntegrationsServer, *res.Name) - if len(integrationsServerFlattened) > 0 { - if err := d.Set("integrations_server", integrationsServerFlattened); err != nil { - return err - } - } - - enterpriseSearchFlattened := flattenEssResources(res.Resources.EnterpriseSearch, *res.Name) - if len(enterpriseSearchFlattened) > 0 { - if err := d.Set("enterprise_search", enterpriseSearchFlattened); err != nil { - return err - } - } - - if settings := flattenTrafficFiltering(res.Settings); settings != nil { - if err := d.Set("traffic_filter", settings); err != nil { - return err - } - } - - if observability := flattenObservability(res.Settings); len(observability) > 0 { - if err := d.Set("observability", observability); err != nil { - return err - } - } - } - - return nil -} - -func getDeploymentTemplateID(res *models.DeploymentResources) (string, error) { - var deploymentTemplateID string - var foundTemplates []string - for _, esRes := range res.Elasticsearch { - if util.IsCurrentEsPlanEmpty(esRes) { - continue - } - - var emptyDT = esRes.Info.PlanInfo.Current.Plan.DeploymentTemplate == nil - if emptyDT { - continue - } - - if deploymentTemplateID == "" { - deploymentTemplateID = *esRes.Info.PlanInfo.Current.Plan.DeploymentTemplate.ID - } - - foundTemplates = append(foundTemplates, - *esRes.Info.PlanInfo.Current.Plan.DeploymentTemplate.ID, - ) - } - - if deploymentTemplateID == "" { - return "", errors.New("failed to obtain the deployment template id") - } - - if len(foundTemplates) > 1 { - return "", fmt.Errorf( - "there are more than 1 deployment templates specified on the deployment: \"%s\"", strings.Join(foundTemplates, ", "), - ) - } - - return deploymentTemplateID, nil -} - -// parseCredentials parses the Create or Update response Resources populating -// credential settings in the Terraform state if the keys are found, currently -// populates the following credentials in plain text: -// * Elasticsearch username and Password -func parseCredentials(d *schema.ResourceData, resources []*models.DeploymentResource) error { - var merr = multierror.NewPrefixed("failed parsing credentials") - for _, res := range resources { - // Parse ES credentials - if creds := res.Credentials; creds != nil { - if creds.Username != nil && *creds.Username != "" { - if err := d.Set("elasticsearch_username", *creds.Username); err != nil { - merr = merr.Append(err) - } - } - - if creds.Password != nil && *creds.Password != "" { - if err := d.Set("elasticsearch_password", *creds.Password); err != nil { - merr = merr.Append(err) - } - } - } - - // Parse APM secret_token - if res.SecretToken != "" { - if err := d.Set("apm_secret_token", res.SecretToken); err != nil { - merr = merr.Append(err) - } - } - } - - return merr.ErrorOrNil() -} - -func getRegion(res *models.DeploymentResources) (region string) { - for _, r := range res.Elasticsearch { - if r.Region != nil && *r.Region != "" { - return *r.Region - } - } - - return region -} - -func getLowestVersion(res *models.DeploymentResources) (string, error) { - // We're starting off with a very high version so it can be replaced. - replaceVersion := `99.99.99` - version := semver.MustParse(replaceVersion) - for _, r := range res.Elasticsearch { - if !util.IsCurrentEsPlanEmpty(r) { - v := r.Info.PlanInfo.Current.Plan.Elasticsearch.Version - if err := swapLowerVersion(&version, v); err != nil && !isEsResourceStopped(r) { - return "", fmt.Errorf("elasticsearch version '%s' is not semver compliant: %w", v, err) - } - } - } - - for _, r := range res.Kibana { - if !util.IsCurrentKibanaPlanEmpty(r) { - v := r.Info.PlanInfo.Current.Plan.Kibana.Version - if err := swapLowerVersion(&version, v); err != nil && !isKibanaResourceStopped(r) { - return version.String(), fmt.Errorf("kibana version '%s' is not semver compliant: %w", v, err) - } - } - } - - for _, r := range res.Apm { - if !util.IsCurrentApmPlanEmpty(r) { - v := r.Info.PlanInfo.Current.Plan.Apm.Version - if err := swapLowerVersion(&version, v); err != nil && !isApmResourceStopped(r) { - return version.String(), fmt.Errorf("apm version '%s' is not semver compliant: %w", v, err) - } - } - } - - for _, r := range res.IntegrationsServer { - if !util.IsCurrentIntegrationsServerPlanEmpty(r) { - v := r.Info.PlanInfo.Current.Plan.IntegrationsServer.Version - if err := swapLowerVersion(&version, v); err != nil && !isIntegrationsServerResourceStopped(r) { - return version.String(), fmt.Errorf("integrations_server version '%s' is not semver compliant: %w", v, err) - } - } - } - - for _, r := range res.EnterpriseSearch { - if !util.IsCurrentEssPlanEmpty(r) { - v := r.Info.PlanInfo.Current.Plan.EnterpriseSearch.Version - if err := swapLowerVersion(&version, v); err != nil && !isEssResourceStopped(r) { - return version.String(), fmt.Errorf("enterprise search version '%s' is not semver compliant: %w", v, err) - } - } - } - - if version.String() != replaceVersion { - return version.String(), nil - } - return "", errors.New("Unable to determine the lowest version for any the deployment components") -} - -func swapLowerVersion(version *semver.Version, comp string) error { - if comp == "" { - return nil - } - - v, err := semver.Parse(comp) - if err != nil { - return err - } - if v.LT(*version) { - *version = v - } - return nil -} - -func hasRunningResources(res *models.DeploymentGetResponse) bool { - var hasRunning bool - if res.Resources != nil { - for _, r := range res.Resources.Elasticsearch { - if !isEsResourceStopped(r) { - hasRunning = true - } - } - for _, r := range res.Resources.Kibana { - if !isKibanaResourceStopped(r) { - hasRunning = true - } - } - for _, r := range res.Resources.Apm { - if !isApmResourceStopped(r) { - hasRunning = true - } - } - for _, r := range res.Resources.EnterpriseSearch { - if !isEssResourceStopped(r) { - hasRunning = true - } - } - for _, r := range res.Resources.IntegrationsServer { - if !isIntegrationsServerResourceStopped(r) { - hasRunning = true - } - } - } - return hasRunning -} - -func flattenTags(tags []*models.MetadataItem) map[string]interface{} { - if len(tags) == 0 { - return nil - } - - result := make(map[string]interface{}, len(tags)) - for _, tag := range tags { - result[*tag.Key] = *tag.Value - } - - return result -} diff --git a/ec/ecresource/deploymentresource/flatteners_test.go b/ec/ecresource/deploymentresource/flatteners_test.go deleted file mode 100644 index b268cd67a..000000000 --- a/ec/ecresource/deploymentresource/flatteners_test.go +++ /dev/null @@ -1,1787 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "errors" - "testing" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" - - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -func Test_modelToState(t *testing.T) { - deploymentSchemaArg := schema.TestResourceDataRaw(t, newSchema(), nil) - deploymentSchemaArg.SetId(mock.ValidClusterID) - - deploymentLowerVersionSchemaArg := schema.TestResourceDataRaw(t, newSchema(), nil) - deploymentLowerVersionSchemaArg.SetId(mock.ValidClusterID) - - wantDeployment := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - - azureIOOptimizedRes := openDeploymentGet(t, "testdata/deployment-azure-io-optimized.json") - azureIOOptimizedRD := schema.TestResourceDataRaw(t, newSchema(), nil) - azureIOOptimizedRD.SetId(mock.ValidClusterID) - wantAzureIOOptimizedDeployment := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "alias": "my-deployment", - "deployment_template_id": "azure-io-optimized", - "id": "123b7b540dfc967a7a649c18e2fce4ed", - "name": "up2d", - "region": "azure-eastus2", - "version": "7.9.2", - "apm": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-apm", - "region": "azure-eastus2", - "resource_id": "1235d8c911b74dd6a03c2a7b37fd68ab", - "version": "7.9.2", - "http_endpoint": "http://1235d8c911b74dd6a03c2a7b37fd68ab.apm.eastus2.azure.elastic-cloud.com:9200", - "https_endpoint": "https://1235d8c911b74dd6a03c2a7b37fd68ab.apm.eastus2.azure.elastic-cloud.com:443", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "azure.apm.e32sv3", - "size": "0.5g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - "elasticsearch": []interface{}{map[string]interface{}{ - "autoscale": "false", - "cloud_id": "up2d:somecloudID", - "http_endpoint": "http://1238f19957874af69306787dca662154.eastus2.azure.elastic-cloud.com:9200", - "https_endpoint": "https://1238f19957874af69306787dca662154.eastus2.azure.elastic-cloud.com:9243", - "ref_id": "main-elasticsearch", - "region": "azure-eastus2", - "resource_id": "1238f19957874af69306787dca662154", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "instance_configuration_id": "azure.data.highio.l32sv2", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - "size": "4g", - "size_resource": "memory", - "zone_count": 2, - }}, - }}, - "kibana": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "region": "azure-eastus2", - "resource_id": "1235cd4a4c7f464bbcfd795f3638b769", - "version": "7.9.2", - "http_endpoint": "http://1235cd4a4c7f464bbcfd795f3638b769.eastus2.azure.elastic-cloud.com:9200", - "https_endpoint": "https://1235cd4a4c7f464bbcfd795f3638b769.eastus2.azure.elastic-cloud.com:9243", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "azure.kibana.e32sv3", - "size": "1g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - Schema: newSchema(), - }) - - awsIOOptimizedRes := openDeploymentGet(t, "testdata/deployment-aws-io-optimized.json") - awsIOOptimizedRD := schema.TestResourceDataRaw(t, newSchema(), nil) - awsIOOptimizedRD.SetId(mock.ValidClusterID) - wantAwsIOOptimizedDeployment := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "alias": "my-deployment", - "deployment_template_id": "aws-io-optimized-v2", - "id": "123b7b540dfc967a7a649c18e2fce4ed", - "name": "up2d", - "region": "aws-eu-central-1", - "version": "7.9.2", - "apm": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-apm", - "region": "aws-eu-central-1", - "resource_id": "12328579b3bf40c8b58c1a0ed5a4bd8b", - "version": "7.9.2", - "http_endpoint": "http://12328579b3bf40c8b58c1a0ed5a4bd8b.apm.eu-central-1.aws.cloud.es.io:80", - "https_endpoint": "https://12328579b3bf40c8b58c1a0ed5a4bd8b.apm.eu-central-1.aws.cloud.es.io:443", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.apm.r5d", - "size": "0.5g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - "elasticsearch": []interface{}{map[string]interface{}{ - "autoscale": "false", - "cloud_id": "up2d:someCloudID", - "http_endpoint": "http://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9200", - "https_endpoint": "https://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9243", - "ref_id": "main-elasticsearch", - "region": "aws-eu-central-1", - "resource_id": "1239f7ee7196439ba2d105319ac5eba7", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "instance_configuration_id": "aws.data.highio.i3", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - "size": "8g", - "size_resource": "memory", - "zone_count": 2, - }}, - }}, - "kibana": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "region": "aws-eu-central-1", - "resource_id": "123dcfda06254ca789eb287e8b73ff4c", - "version": "7.9.2", - "http_endpoint": "http://123dcfda06254ca789eb287e8b73ff4c.eu-central-1.aws.cloud.es.io:9200", - "https_endpoint": "https://123dcfda06254ca789eb287e8b73ff4c.eu-central-1.aws.cloud.es.io:9243", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r5d", - "size": "1g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - Schema: newSchema(), - }) - - awsIOOptimizedExtensionRD := schema.TestResourceDataRaw(t, newSchema(), nil) - awsIOOptimizedExtensionRD.SetId(mock.ValidClusterID) - - awsIOOptimizedTagsRes := openDeploymentGet(t, "testdata/deployment-aws-io-optimized-tags.json") - awsIOOptimizedTagsRD := schema.TestResourceDataRaw(t, newSchema(), nil) - awsIOOptimizedTagsRD.SetId(mock.ValidClusterID) - wantAwsIOOptimizedDeploymentTags := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "alias": "my-deployment", - "deployment_template_id": "aws-io-optimized-v2", - "id": "123b7b540dfc967a7a649c18e2fce4ed", - "name": "up2d", - "region": "aws-eu-central-1", - "tags": map[string]interface{}{ - "aaa": "bbb", - "cost": "rnd", - "owner": "elastic", - }, - "version": "7.9.2", - "apm": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-apm", - "region": "aws-eu-central-1", - "resource_id": "12328579b3bf40c8b58c1a0ed5a4bd8b", - "version": "7.9.2", - "http_endpoint": "http://12328579b3bf40c8b58c1a0ed5a4bd8b.apm.eu-central-1.aws.cloud.es.io:80", - "https_endpoint": "https://12328579b3bf40c8b58c1a0ed5a4bd8b.apm.eu-central-1.aws.cloud.es.io:443", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.apm.r5d", - "size": "0.5g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - "elasticsearch": []interface{}{map[string]interface{}{ - "autoscale": "false", - "cloud_id": "up2d:someCloudID", - "http_endpoint": "http://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9200", - "https_endpoint": "https://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9243", - "ref_id": "main-elasticsearch", - "region": "aws-eu-central-1", - "resource_id": "1239f7ee7196439ba2d105319ac5eba7", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "instance_configuration_id": "aws.data.highio.i3", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - "size": "8g", - "size_resource": "memory", - "zone_count": 2, - }}, - }}, - "kibana": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "region": "aws-eu-central-1", - "resource_id": "123dcfda06254ca789eb287e8b73ff4c", - "version": "7.9.2", - "http_endpoint": "http://123dcfda06254ca789eb287e8b73ff4c.eu-central-1.aws.cloud.es.io:9200", - "https_endpoint": "https://123dcfda06254ca789eb287e8b73ff4c.eu-central-1.aws.cloud.es.io:9243", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r5d", - "size": "1g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - Schema: newSchema(), - }) - - gcpIOOptimizedRes := openDeploymentGet(t, "testdata/deployment-gcp-io-optimized.json") - gcpIOOptimizedRD := schema.TestResourceDataRaw(t, newSchema(), nil) - gcpIOOptimizedRD.SetId(mock.ValidClusterID) - wantGcpIOOptimizedDeployment := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "alias": "my-deployment", - "deployment_template_id": "gcp-io-optimized", - "id": "123b7b540dfc967a7a649c18e2fce4ed", - "name": "up2d", - "region": "gcp-asia-east1", - "version": "7.9.2", - "apm": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-apm", - "region": "gcp-asia-east1", - "resource_id": "12307c6c304949b8a9f3682b80900879", - "version": "7.9.2", - "http_endpoint": "http://12307c6c304949b8a9f3682b80900879.apm.asia-east1.gcp.elastic-cloud.com:80", - "https_endpoint": "https://12307c6c304949b8a9f3682b80900879.apm.asia-east1.gcp.elastic-cloud.com:443", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "gcp.apm.1", - "size": "0.5g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - "elasticsearch": []interface{}{map[string]interface{}{ - "autoscale": "false", - "cloud_id": "up2d:someCloudID", - "http_endpoint": "http://123695e76d914005bf90b717e668ad4b.asia-east1.gcp.elastic-cloud.com:9200", - "https_endpoint": "https://123695e76d914005bf90b717e668ad4b.asia-east1.gcp.elastic-cloud.com:9243", - "ref_id": "main-elasticsearch", - "region": "gcp-asia-east1", - "resource_id": "123695e76d914005bf90b717e668ad4b", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "instance_configuration_id": "gcp.data.highio.1", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - "size": "8g", - "size_resource": "memory", - "zone_count": 2, - }}, - }}, - "kibana": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "region": "gcp-asia-east1", - "resource_id": "12365046781e4d729a07df64fe67c8c6", - "version": "7.9.2", - "http_endpoint": "http://12365046781e4d729a07df64fe67c8c6.asia-east1.gcp.elastic-cloud.com:9200", - "https_endpoint": "https://12365046781e4d729a07df64fe67c8c6.asia-east1.gcp.elastic-cloud.com:9243", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "gcp.kibana.1", - "size": "1g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - Schema: newSchema(), - }) - - gcpIOOptimizedAutoscaleRes := openDeploymentGet(t, "testdata/deployment-gcp-io-optimized-autoscale.json") - gcpIOOptimizedAutoscaleRD := schema.TestResourceDataRaw(t, newSchema(), nil) - gcpIOOptimizedAutoscaleRD.SetId(mock.ValidClusterID) - - gcpHotWarmRes := openDeploymentGet(t, "testdata/deployment-gcp-hot-warm.json") - gcpHotWarmRD := schema.TestResourceDataRaw(t, newSchema(), nil) - gcpHotWarmRD.SetId(mock.ValidClusterID) - wantGcpHotWarmDeployment := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "deployment_template_id": "gcp-hot-warm", - "id": "123b7b540dfc967a7a649c18e2fce4ed", - "name": "up2d-hot-warm", - "region": "gcp-us-central1", - "version": "7.9.2", - "apm": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-apm", - "region": "gcp-us-central1", - "resource_id": "1234b68b0b9347f1b49b1e01b33bf4a4", - "version": "7.9.2", - "http_endpoint": "http://1234b68b0b9347f1b49b1e01b33bf4a4.apm.us-central1.gcp.cloud.es.io:80", - "https_endpoint": "https://1234b68b0b9347f1b49b1e01b33bf4a4.apm.us-central1.gcp.cloud.es.io:443", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "gcp.apm.1", - "size": "0.5g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - "elasticsearch": []interface{}{map[string]interface{}{ - "autoscale": "false", - "cloud_id": "up2d-hot-warm:someCloudID", - "http_endpoint": "http://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9200", - "https_endpoint": "https://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9243", - "ref_id": "main-elasticsearch", - "region": "gcp-us-central1", - "resource_id": "123e837db6ee4391bb74887be35a7a91", - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "instance_configuration_id": "gcp.data.highio.1", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - "size": "4g", - "size_resource": "memory", - "zone_count": 2, - }, - map[string]interface{}{ - "id": "warm", - "instance_configuration_id": "gcp.data.highstorage.1", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "false", - "node_type_ml": "false", - "size": "4g", - "size_resource": "memory", - "zone_count": 2, - }, - }, - }}, - "kibana": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "region": "gcp-us-central1", - "resource_id": "12372cc60d284e7e96b95ad14727c23d", - "version": "7.9.2", - "http_endpoint": "http://12372cc60d284e7e96b95ad14727c23d.us-central1.gcp.cloud.es.io:9200", - "https_endpoint": "https://12372cc60d284e7e96b95ad14727c23d.us-central1.gcp.cloud.es.io:9243", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "gcp.kibana.1", - "size": "1g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - Schema: newSchema(), - }) - _ = wantGcpHotWarmDeployment.Set("alias", "") - - wantGcpIOOptAutoscale := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "alias": "", - "deployment_template_id": "gcp-io-optimized", - "id": "123b7b540dfc967a7a649c18e2fce4ed", - "name": "up2d", - "region": "gcp-asia-east1", - "version": "7.9.2", - "apm": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-apm", - "region": "gcp-asia-east1", - "resource_id": "12307c6c304949b8a9f3682b80900879", - "version": "7.9.2", - "http_endpoint": "http://12307c6c304949b8a9f3682b80900879.apm.asia-east1.gcp.elastic-cloud.com:80", - "https_endpoint": "https://12307c6c304949b8a9f3682b80900879.apm.asia-east1.gcp.elastic-cloud.com:443", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "gcp.apm.1", - "size": "0.5g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - "elasticsearch": []interface{}{map[string]interface{}{ - "autoscale": "true", - "cloud_id": "up2d:someCloudID", - "http_endpoint": "http://123695e76d914005bf90b717e668ad4b.asia-east1.gcp.elastic-cloud.com:9200", - "https_endpoint": "https://123695e76d914005bf90b717e668ad4b.asia-east1.gcp.elastic-cloud.com:9243", - "ref_id": "main-elasticsearch", - "region": "gcp-asia-east1", - "resource_id": "123695e76d914005bf90b717e668ad4b", - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "instance_configuration_id": "gcp.data.highio.1", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - "size": "8g", - "size_resource": "memory", - "zone_count": 2, - "autoscaling": []interface{}{map[string]interface{}{ - "max_size": "29g", - "max_size_resource": "memory", - "policy_override_json": `{"proactive_storage":{"forecast_window":"3 h"}}`, - }}, - }, - map[string]interface{}{ - "id": "ml", - "instance_configuration_id": "gcp.ml.1", - "node_type_data": "false", - "node_type_ingest": "false", - "node_type_master": "false", - "node_type_ml": "true", - "size": "1g", - "size_resource": "memory", - "zone_count": 1, - "autoscaling": []interface{}{map[string]interface{}{ - "max_size": "30g", - "max_size_resource": "memory", - - "min_size": "1g", - "min_size_resource": "memory", - }}, - }, - }, - }}, - "kibana": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "region": "gcp-asia-east1", - "resource_id": "12365046781e4d729a07df64fe67c8c6", - "version": "7.9.2", - "http_endpoint": "http://12365046781e4d729a07df64fe67c8c6.asia-east1.gcp.elastic-cloud.com:9200", - "https_endpoint": "https://12365046781e4d729a07df64fe67c8c6.asia-east1.gcp.elastic-cloud.com:9243", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "gcp.kibana.1", - "size": "1g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - Schema: newSchema(), - }) - _ = wantGcpIOOptAutoscale.Set("alias", "") - - gcpHotWarmNodeRolesRes := openDeploymentGet(t, "testdata/deployment-gcp-hot-warm-node_roles.json") - gcpHotWarmNodeRolesRD := schema.TestResourceDataRaw(t, newSchema(), nil) - gcpHotWarmNodeRolesRD.SetId(mock.ValidClusterID) - wantGcpHotWarmNodeRolesDeployment := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "deployment_template_id": "gcp-hot-warm", - "id": "123b7b540dfc967a7a649c18e2fce4ed", - "name": "up2d-hot-warm", - "region": "gcp-us-central1", - "version": "7.11.0", - "apm": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-apm", - "region": "gcp-us-central1", - "resource_id": "1234b68b0b9347f1b49b1e01b33bf4a4", - "version": "7.11.0", - "http_endpoint": "http://1234b68b0b9347f1b49b1e01b33bf4a4.apm.us-central1.gcp.cloud.es.io:80", - "https_endpoint": "https://1234b68b0b9347f1b49b1e01b33bf4a4.apm.us-central1.gcp.cloud.es.io:443", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "gcp.apm.1", - "size": "0.5g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - "elasticsearch": []interface{}{map[string]interface{}{ - "autoscale": "false", - "cloud_id": "up2d-hot-warm:someCloudID", - "http_endpoint": "http://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9200", - "https_endpoint": "https://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9243", - "ref_id": "main-elasticsearch", - "region": "gcp-us-central1", - "resource_id": "123e837db6ee4391bb74887be35a7a91", - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "instance_configuration_id": "gcp.data.highio.1", - "size": "4g", - "size_resource": "memory", - "zone_count": 2, - "node_roles": []interface{}{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - }, - map[string]interface{}{ - "id": "warm", - "instance_configuration_id": "gcp.data.highstorage.1", - "size": "4g", - "size_resource": "memory", - "zone_count": 2, - "node_roles": []interface{}{ - "data_warm", - "remote_cluster_client", - }, - }, - }, - }}, - "kibana": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "region": "gcp-us-central1", - "resource_id": "12372cc60d284e7e96b95ad14727c23d", - "version": "7.11.0", - "http_endpoint": "http://12372cc60d284e7e96b95ad14727c23d.us-central1.gcp.cloud.es.io:9200", - "https_endpoint": "https://12372cc60d284e7e96b95ad14727c23d.us-central1.gcp.cloud.es.io:9243", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "gcp.kibana.1", - "size": "1g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - Schema: newSchema(), - }) - _ = wantGcpHotWarmNodeRolesDeployment.Set("alias", "") - - awsCCSRes := openDeploymentGet(t, "testdata/deployment-aws-ccs.json") - awsCCSRD := schema.TestResourceDataRaw(t, newSchema(), nil) - awsCCSRD.SetId(mock.ValidClusterID) - wantAWSCCSDeployment := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "deployment_template_id": "aws-cross-cluster-search-v2", - "id": "123b7b540dfc967a7a649c18e2fce4ed", - "name": "ccs", - "region": "eu-west-1", - "version": "7.9.2", - "elasticsearch": []interface{}{map[string]interface{}{ - "autoscale": "false", - "cloud_id": "ccs:someCloudID", - "http_endpoint": "http://1230b3ae633b4f51a432d50971f7f1c1.eu-west-1.aws.found.io:9200", - "https_endpoint": "https://1230b3ae633b4f51a432d50971f7f1c1.eu-west-1.aws.found.io:9243", - "ref_id": "main-elasticsearch", - "region": "eu-west-1", - "resource_id": "1230b3ae633b4f51a432d50971f7f1c1", - "remote_cluster": []interface{}{ - map[string]interface{}{ - "alias": "alias", - "deployment_id": "someid", - "ref_id": "main-elasticsearch", - "skip_unavailable": true, - }, - map[string]interface{}{ - "deployment_id": "some other id", - "ref_id": "main-elasticsearch", - }, - }, - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "instance_configuration_id": "aws.ccs.r5d", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - "size": "1g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - "kibana": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "region": "eu-west-1", - "resource_id": "12317425e9e14491b74ee043db3402eb", - "version": "7.9.2", - "http_endpoint": "http://12317425e9e14491b74ee043db3402eb.eu-west-1.aws.found.io:9200", - "https_endpoint": "https://12317425e9e14491b74ee043db3402eb.eu-west-1.aws.found.io:9243", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r5d", - "size": "1g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - Schema: newSchema(), - }) - _ = wantAWSCCSDeployment.Set("alias", "") - argCCSRemotes := models.RemoteResources{Resources: []*models.RemoteResourceRef{ - { - Alias: ec.String("alias"), - DeploymentID: ec.String("someid"), - ElasticsearchRefID: ec.String("main-elasticsearch"), - SkipUnavailable: ec.Bool(true), - }, - { - DeploymentID: ec.String("some other id"), - ElasticsearchRefID: ec.String("main-elasticsearch"), - }, - }} - - type args struct { - d *schema.ResourceData - res *models.DeploymentGetResponse - remotes models.RemoteResources - } - tests := []struct { - name string - args args - want *schema.ResourceData - err error - }{ - { - name: "flattens deployment resources", - want: wantDeployment, - args: args{ - d: deploymentSchemaArg, - res: &models.DeploymentGetResponse{ - Alias: "my-deployment", - Name: ec.String("my_deployment_name"), - Settings: &models.DeploymentSettings{ - TrafficFilterSettings: &models.TrafficFilterSettings{ - Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, - }, - Observability: &models.DeploymentObservabilitySettings{ - Logging: &models.DeploymentLoggingSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: &mock.ValidClusterID, - RefID: "main-elasticsearch", - }, - }, - Metrics: &models.DeploymentMetricsSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: &mock.ValidClusterID, - RefID: "main-elasticsearch", - }, - }, - }, - }, - Resources: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{ - { - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Info: &models.ElasticsearchClusterInfo{ - Status: ec.String("started"), - ClusterID: &mock.ValidClusterID, - ClusterName: ec.String("some-name"), - Region: "us-east-1", - ElasticsearchMonitoringInfo: &models.ElasticsearchMonitoringInfo{ - DestinationClusterIds: []string{"some"}, - }, - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Current: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.7.0", - UserSettingsYaml: `some.setting: value`, - UserSettingsOverrideYaml: `some.setting: value2`, - UserSettingsJSON: map[string]interface{}{ - "some.setting": "value", - }, - UserSettingsOverrideJSON: map[string]interface{}{ - "some.setting": "value2", - }, - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 1, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - Ml: ec.Bool(false), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }}, - }, - }, - }, - }, - }, - }, - Kibana: []*models.KibanaResourceInfo{ - { - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Info: &models.KibanaClusterInfo{ - Status: ec.String("started"), - ClusterID: &mock.ValidClusterID, - ClusterName: ec.String("some-kibana-name"), - Region: "us-east-1", - PlanInfo: &models.KibanaClusterPlansInfo{ - Current: &models.KibanaClusterPlanInfo{ - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{ - Version: "7.7.0", - }, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - }, - }, - }, - Apm: []*models.ApmResourceInfo{{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-apm"), - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Info: &models.ApmInfo{ - Status: ec.String("started"), - ID: &mock.ValidClusterID, - Name: ec.String("some-apm-name"), - Region: "us-east-1", - PlanInfo: &models.ApmPlansInfo{ - Current: &models.ApmPlanInfo{ - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{ - Version: "7.7.0", - SystemSettings: &models.ApmSystemSettings{ - DebugEnabled: ec.Bool(false), - }, - }, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(512), - }, - }}, - }, - }, - }, - }, - }}, - EnterpriseSearch: []*models.EnterpriseSearchResourceInfo{ - { - Region: ec.String("us-east-1"), - RefID: ec.String("main-enterprise_search"), - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Info: &models.EnterpriseSearchInfo{ - Status: ec.String("started"), - ID: &mock.ValidClusterID, - Name: ec.String("some-enterprise_search-name"), - Region: "us-east-1", - PlanInfo: &models.EnterpriseSearchPlansInfo{ - Current: &models.EnterpriseSearchPlanInfo{ - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{ - Version: "7.7.0", - }, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "sets the global version to the lesser version", - args: args{ - d: deploymentLowerVersionSchemaArg, - res: &models.DeploymentGetResponse{ - Alias: "my-deployment", - Name: ec.String("my_deployment_name"), - Settings: &models.DeploymentSettings{ - TrafficFilterSettings: &models.TrafficFilterSettings{ - Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, - }, - }, - Resources: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{ - { - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Info: &models.ElasticsearchClusterInfo{ - Status: ec.String("started"), - ClusterID: &mock.ValidClusterID, - ClusterName: ec.String("some-name"), - Region: "us-east-1", - ElasticsearchMonitoringInfo: &models.ElasticsearchMonitoringInfo{ - DestinationClusterIds: []string{"some"}, - }, - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Current: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.7.0", - UserSettingsYaml: `some.setting: value`, - UserSettingsOverrideYaml: `some.setting: value2`, - UserSettingsJSON: map[string]interface{}{ - "some.setting": "value", - }, - UserSettingsOverrideJSON: map[string]interface{}{ - "some.setting": "value2", - }, - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 1, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - Ml: ec.Bool(false), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }}, - }, - }, - }, - }, - }, - }, - Kibana: []*models.KibanaResourceInfo{ - { - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Info: &models.KibanaClusterInfo{ - Status: ec.String("started"), - ClusterID: &mock.ValidClusterID, - ClusterName: ec.String("some-kibana-name"), - Region: "us-east-1", - PlanInfo: &models.KibanaClusterPlansInfo{ - Current: &models.KibanaClusterPlanInfo{ - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{ - Version: "7.6.2", - }, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - want: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "alias": "my-deployment", - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.6.2", - "elasticsearch": []interface{}{map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "us-east-1", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: value2", - "user_settings_json": "{\"some.setting\":\"value\"}", - "user_settings_override_json": "{\"some.setting\":\"value2\"}", - }}, - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "instance_configuration_id": "aws.data.highio.i3", - "size": "2g", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - "zone_count": 1, - }}, - }}, - "kibana": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "us-east-1", - "topology": []interface{}{ - map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r5d", - "size": "1g", - "zone_count": 1, - }, - }, - }}, - "traffic_filter": []interface{}{"0.0.0.0/0", "192.168.10.0/24"}, - }, - }), - }, - { - name: "flattens an azure plan (io-optimized)", - args: args{d: azureIOOptimizedRD, res: azureIOOptimizedRes}, - want: wantAzureIOOptimizedDeployment, - }, - { - name: "flattens an aws plan (io-optimized)", - args: args{d: awsIOOptimizedRD, res: awsIOOptimizedRes}, - want: wantAwsIOOptimizedDeployment, - }, - { - name: "flattens an aws plan with extensions (io-optimized)", - args: args{ - d: awsIOOptimizedExtensionRD, - res: openDeploymentGet(t, "testdata/deployment-aws-io-optimized-extension.json"), - }, - want: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "alias": "my-deployment", - "deployment_template_id": "aws-io-optimized-v2", - "id": "123b7b540dfc967a7a649c18e2fce4ed", - "name": "up2d", - "region": "aws-eu-central-1", - "version": "7.9.2", - "apm": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-apm", - "region": "aws-eu-central-1", - "resource_id": "12328579b3bf40c8b58c1a0ed5a4bd8b", - "version": "7.9.2", - "http_endpoint": "http://12328579b3bf40c8b58c1a0ed5a4bd8b.apm.eu-central-1.aws.cloud.es.io:80", - "https_endpoint": "https://12328579b3bf40c8b58c1a0ed5a4bd8b.apm.eu-central-1.aws.cloud.es.io:443", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.apm.r5d", - "size": "0.5g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - "elasticsearch": []interface{}{map[string]interface{}{ - "autoscale": "false", - "cloud_id": "up2d:someCloudID", - "extension": []interface{}{ - map[string]interface{}{ - "name": "custom-bundle", - "version": "7.9.2", - "url": "http://12345", - "type": "bundle", - }, - map[string]interface{}{ - "name": "custom-bundle2", - "version": "7.9.2", - "url": "http://123456", - "type": "bundle", - }, - map[string]interface{}{ - "name": "custom-plugin", - "version": "7.9.2", - "url": "http://12345", - "type": "plugin", - }, - map[string]interface{}{ - "name": "custom-plugin2", - "version": "7.9.2", - "url": "http://123456", - "type": "plugin", - }, - }, - "http_endpoint": "http://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9200", - "https_endpoint": "https://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9243", - "ref_id": "main-elasticsearch", - "region": "aws-eu-central-1", - "resource_id": "1239f7ee7196439ba2d105319ac5eba7", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "instance_configuration_id": "aws.data.highio.i3", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - "size": "8g", - "size_resource": "memory", - "zone_count": 2, - }}, - }}, - "kibana": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "region": "aws-eu-central-1", - "resource_id": "123dcfda06254ca789eb287e8b73ff4c", - "version": "7.9.2", - "http_endpoint": "http://123dcfda06254ca789eb287e8b73ff4c.eu-central-1.aws.cloud.es.io:9200", - "https_endpoint": "https://123dcfda06254ca789eb287e8b73ff4c.eu-central-1.aws.cloud.es.io:9243", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r5d", - "size": "1g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - Schema: newSchema(), - }), - }, - { - name: "flattens an aws plan with trusts", - args: args{ - d: newDeploymentRD(t, "123b7b540dfc967a7a649c18e2fce4ed", nil), - res: &models.DeploymentGetResponse{ - ID: ec.String("123b7b540dfc967a7a649c18e2fce4ed"), - Alias: "OH", - Name: ec.String("up2d"), - Resources: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{{ - RefID: ec.String("main-elasticsearch"), - Region: ec.String("aws-eu-central-1"), - Info: &models.ElasticsearchClusterInfo{ - Status: ec.String("running"), - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Current: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.13.1", - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Size: &models.TopologySize{ - Value: ec.Int32(4096), - Resource: ec.String("memory"), - }, - }}, - }, - }, - }, - Settings: &models.ElasticsearchClusterSettings{ - Trust: &models.ElasticsearchClusterTrustSettings{ - Accounts: []*models.AccountTrustRelationship{ - { - AccountID: ec.String("ANID"), - TrustAll: ec.Bool(true), - }, - { - AccountID: ec.String("anotherID"), - TrustAll: ec.Bool(false), - TrustAllowlist: []string{ - "abc", "dfg", "hij", - }, - }, - }, - External: []*models.ExternalTrustRelationship{ - { - TrustRelationshipID: ec.String("external_id"), - TrustAll: ec.Bool(true), - }, - { - TrustRelationshipID: ec.String("another_external_id"), - TrustAll: ec.Bool(false), - TrustAllowlist: []string{ - "abc", "dfg", - }, - }, - }, - }, - }, - }, - }}, - }, - }, - }, - want: util.NewResourceData(t, util.ResDataParams{ - ID: "123b7b540dfc967a7a649c18e2fce4ed", - State: map[string]interface{}{ - "alias": "OH", - "deployment_template_id": "aws-io-optimized-v2", - "id": "123b7b540dfc967a7a649c18e2fce4ed", - "name": "up2d", - "region": "aws-eu-central-1", - "version": "7.13.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "region": "aws-eu-central-1", - "ref_id": "main-elasticsearch", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "4g", - "size_resource": "memory", - }}, - "trust_account": []interface{}{ - map[string]interface{}{ - "account_id": "ANID", - "trust_all": "true", - }, - map[string]interface{}{ - "account_id": "anotherID", - "trust_all": "false", - "trust_allowlist": []interface{}{ - "abc", "hij", "dfg", - }, - }, - }, - "trust_external": []interface{}{ - map[string]interface{}{ - "relationship_id": "another_external_id", - "trust_all": "false", - "trust_allowlist": []interface{}{ - "abc", "dfg", - }, - }, - map[string]interface{}{ - "relationship_id": "external_id", - "trust_all": "true", - }, - }, - }}, - }, - Schema: newSchema(), - }), - }, - { - name: "flattens an aws plan with topology.config set", - args: args{ - d: newDeploymentRD(t, "123b7b540dfc967a7a649c18e2fce4ed", nil), - res: &models.DeploymentGetResponse{ - ID: ec.String("123b7b540dfc967a7a649c18e2fce4ed"), - Alias: "OH", - Name: ec.String("up2d"), - Resources: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{{ - RefID: ec.String("main-elasticsearch"), - Region: ec.String("aws-eu-central-1"), - Info: &models.ElasticsearchClusterInfo{ - Status: ec.String("running"), - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Current: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.13.1", - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Size: &models.TopologySize{ - Value: ec.Int32(4096), - Resource: ec.String("memory"), - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - UserSettingsYaml: "a.setting: true", - }, - }}, - }, - }, - }, - Settings: &models.ElasticsearchClusterSettings{}, - }, - }}, - }, - }, - }, - want: util.NewResourceData(t, util.ResDataParams{ - ID: "123b7b540dfc967a7a649c18e2fce4ed", - State: map[string]interface{}{ - "alias": "OH", - "deployment_template_id": "aws-io-optimized-v2", - "id": "123b7b540dfc967a7a649c18e2fce4ed", - "name": "up2d", - "region": "aws-eu-central-1", - "version": "7.13.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "region": "aws-eu-central-1", - "ref_id": "main-elasticsearch", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "4g", - "size_resource": "memory", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "a.setting: true", - }}, - }}, - }}, - }, - Schema: newSchema(), - }), - }, - { - name: "flattens an plan with config.docker_image set", - args: args{ - d: newDeploymentRD(t, "123b7b540dfc967a7a649c18e2fce4ed", nil), - res: &models.DeploymentGetResponse{ - ID: ec.String("123b7b540dfc967a7a649c18e2fce4ed"), - Alias: "OH", - Name: ec.String("up2d"), - Resources: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{{ - RefID: ec.String("main-elasticsearch"), - Region: ec.String("aws-eu-central-1"), - Info: &models.ElasticsearchClusterInfo{ - Status: ec.String("running"), - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Current: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.14.1", - DockerImage: "docker.elastic.com/elasticsearch/cloud:7.14.1-hash", - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Size: &models.TopologySize{ - Value: ec.Int32(4096), - Resource: ec.String("memory"), - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - UserSettingsYaml: "a.setting: true", - }, - ZoneCount: 1, - }}, - }, - }, - }, - Settings: &models.ElasticsearchClusterSettings{}, - }, - }}, - Apm: []*models.ApmResourceInfo{{ - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - RefID: ec.String("main-apm"), - Region: ec.String("aws-eu-central-1"), - Info: &models.ApmInfo{ - Status: ec.String("running"), - PlanInfo: &models.ApmPlansInfo{Current: &models.ApmPlanInfo{ - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{ - Version: "7.14.1", - DockerImage: "docker.elastic.com/apm/cloud:7.14.1-hash", - SystemSettings: &models.ApmSystemSettings{ - DebugEnabled: ec.Bool(false), - }, - }, - ClusterTopology: []*models.ApmTopologyElement{{ - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(512), - }, - ZoneCount: 1, - }}, - }, - }}, - }, - }}, - Kibana: []*models.KibanaResourceInfo{{ - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - RefID: ec.String("main-kibana"), - Region: ec.String("aws-eu-central-1"), - Info: &models.KibanaClusterInfo{ - Status: ec.String("running"), - PlanInfo: &models.KibanaClusterPlansInfo{Current: &models.KibanaClusterPlanInfo{ - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{ - Version: "7.14.1", - DockerImage: "docker.elastic.com/kibana/cloud:7.14.1-hash", - }, - ClusterTopology: []*models.KibanaClusterTopologyElement{{ - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - ZoneCount: 1, - }}, - }, - }}, - }, - }}, - EnterpriseSearch: []*models.EnterpriseSearchResourceInfo{{ - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - RefID: ec.String("main-enterprise_search"), - Region: ec.String("aws-eu-central-1"), - Info: &models.EnterpriseSearchInfo{ - Status: ec.String("running"), - PlanInfo: &models.EnterpriseSearchPlansInfo{Current: &models.EnterpriseSearchPlanInfo{ - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{ - Version: "7.14.1", - DockerImage: "docker.elastic.com/enterprise_search/cloud:7.14.1-hash", - }, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - ZoneCount: 2, - }}, - }, - }}, - }, - }}, - }, - }, - }, - want: util.NewResourceData(t, util.ResDataParams{ - ID: "123b7b540dfc967a7a649c18e2fce4ed", - State: map[string]interface{}{ - "alias": "OH", - "deployment_template_id": "aws-io-optimized-v2", - "id": "123b7b540dfc967a7a649c18e2fce4ed", - "name": "up2d", - "region": "aws-eu-central-1", - "version": "7.14.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "region": "aws-eu-central-1", - "ref_id": "main-elasticsearch", - "config": []interface{}{map[string]interface{}{ - "docker_image": "docker.elastic.com/elasticsearch/cloud:7.14.1-hash", - }}, - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "4g", - "size_resource": "memory", - "zone_count": 1, - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "a.setting: true", - }}, - }}, - }}, - "kibana": []interface{}{map[string]interface{}{ - "region": "aws-eu-central-1", - "ref_id": "main-kibana", - "config": []interface{}{map[string]interface{}{ - "docker_image": "docker.elastic.com/kibana/cloud:7.14.1-hash", - }}, - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r5d", - "size": "1g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - "apm": []interface{}{map[string]interface{}{ - "region": "aws-eu-central-1", - "ref_id": "main-apm", - "config": []interface{}{map[string]interface{}{ - "docker_image": "docker.elastic.com/apm/cloud:7.14.1-hash", - }}, - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.apm.r5d", - "size": "0.5g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - "enterprise_search": []interface{}{map[string]interface{}{ - "region": "aws-eu-central-1", - "ref_id": "main-enterprise_search", - "config": []interface{}{map[string]interface{}{ - "docker_image": "docker.elastic.com/enterprise_search/cloud:7.14.1-hash", - }}, - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.enterprisesearch.m5d", - "size": "2g", - "size_resource": "memory", - "zone_count": 2, - "node_type_appserver": "true", - "node_type_connector": "true", - "node_type_worker": "true", - }}, - }}, - }, - Schema: newSchema(), - }), - }, - { - name: "flattens an aws plan (io-optimized) with tags", - args: args{d: awsIOOptimizedTagsRD, res: awsIOOptimizedTagsRes}, - want: wantAwsIOOptimizedDeploymentTags, - }, - { - name: "flattens a gcp plan (io-optimized)", - args: args{d: gcpIOOptimizedRD, res: gcpIOOptimizedRes}, - want: wantGcpIOOptimizedDeployment, - }, - { - name: "flattens a gcp plan with autoscale set (io-optimized)", - args: args{d: gcpIOOptimizedRD, res: gcpIOOptimizedAutoscaleRes}, - want: wantGcpIOOptAutoscale, - }, - { - name: "flattens a gcp plan (hot-warm)", - args: args{d: gcpHotWarmRD, res: gcpHotWarmRes}, - want: wantGcpHotWarmDeployment, - }, - { - name: "flattens a gcp plan (hot-warm) with node_roles", - args: args{d: gcpHotWarmNodeRolesRD, res: gcpHotWarmNodeRolesRes}, - want: wantGcpHotWarmNodeRolesDeployment, - }, - { - name: "flattens an aws plan (Cross Cluster Search)", - args: args{d: awsCCSRD, res: awsCCSRes, remotes: argCCSRemotes}, - want: wantAWSCCSDeployment, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := modelToState(tt.args.d, tt.args.res, tt.args.remotes) - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } - - var wantState interface{} - if tt.want != nil { - wantState = tt.want.State().Attributes - } - - assert.Equal(t, wantState, tt.args.d.State().Attributes) - }) - } -} - -func Test_getDeploymentTemplateID(t *testing.T) { - type args struct { - res *models.DeploymentResources - } - tests := []struct { - name string - args args - want string - err error - }{ - { - name: "empty resources returns an error", - args: args{res: &models.DeploymentResources{}}, - err: errors.New("failed to obtain the deployment template id"), - }, - { - name: "single empty current plan returns error", - args: args{res: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{ - { - Info: &models.ElasticsearchClusterInfo{ - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Pending: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized"), - }, - }, - }, - }, - }, - }, - }, - }}, - err: errors.New("failed to obtain the deployment template id"), - }, - { - name: "multiple deployment templates returns an error", - args: args{res: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{ - { - Info: &models.ElasticsearchClusterInfo{ - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Current: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("someid"), - }, - }, - }, - }, - }, - }, - { - Info: &models.ElasticsearchClusterInfo{ - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Current: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("someotherid"), - }, - }, - }, - }, - }, - }, - }, - }}, - err: errors.New("there are more than 1 deployment templates specified on the deployment: \"someid, someotherid\""), - }, - { - name: "single deployment template returns it", - args: args{res: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{ - { - Info: &models.ElasticsearchClusterInfo{ - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Current: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized"), - }, - }, - }, - }, - }, - }, - }, - }}, - want: "aws-io-optimized", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := getDeploymentTemplateID(tt.args.res) - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } - - assert.Equal(t, tt.want, got) - }) - } -} - -func Test_parseCredentials(t *testing.T) { - deploymentRD := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - - rawData := newSampleLegacyDeployment() - rawData["elasticsearch_username"] = "my-username" - rawData["elasticsearch_password"] = "my-password" - rawData["apm_secret_token"] = "some-secret-token" - - wantDeploymentRD := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: rawData, - Schema: newSchema(), - }) - - type args struct { - d *schema.ResourceData - resources []*models.DeploymentResource - } - tests := []struct { - name string - args args - want *schema.ResourceData - err error - }{ - { - name: "Parses credentials", - args: args{ - d: deploymentRD, - resources: []*models.DeploymentResource{{ - Credentials: &models.ClusterCredentials{ - Username: ec.String("my-username"), - Password: ec.String("my-password"), - }, - SecretToken: "some-secret-token", - }}, - }, - want: wantDeploymentRD, - }, - { - name: "when no credentials are passed, it doesn't overwrite them", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: rawData, - Schema: newSchema(), - }), - resources: []*models.DeploymentResource{ - {}, - }, - }, - want: wantDeploymentRD, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := parseCredentials(tt.args.d, tt.args.resources) - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } - - assert.Equal(t, tt.want.State().Attributes, tt.args.d.State().Attributes) - }) - } -} - -func Test_hasRunningResources(t *testing.T) { - type args struct { - res *models.DeploymentGetResponse - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "has all the resources stopped", - args: args{res: &models.DeploymentGetResponse{Resources: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{ - {Info: &models.ElasticsearchClusterInfo{Status: ec.String("stopped")}}, - }, - Kibana: []*models.KibanaResourceInfo{ - {Info: &models.KibanaClusterInfo{Status: ec.String("stopped")}}, - }, - Apm: []*models.ApmResourceInfo{ - {Info: &models.ApmInfo{Status: ec.String("stopped")}}, - }, - EnterpriseSearch: []*models.EnterpriseSearchResourceInfo{ - {Info: &models.EnterpriseSearchInfo{Status: ec.String("stopped")}}, - }, - }}}, - want: false, - }, - { - name: "has some resources stopped", - args: args{res: &models.DeploymentGetResponse{Resources: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{ - {Info: &models.ElasticsearchClusterInfo{Status: ec.String("running")}}, - }, - Kibana: []*models.KibanaResourceInfo{ - {Info: &models.KibanaClusterInfo{Status: ec.String("stopped")}}, - }, - Apm: []*models.ApmResourceInfo{ - {Info: &models.ApmInfo{Status: ec.String("running")}}, - }, - EnterpriseSearch: []*models.EnterpriseSearchResourceInfo{ - {Info: &models.EnterpriseSearchInfo{Status: ec.String("running")}}, - }, - }}}, - want: true, - }, - { - name: "has all resources running", - args: args{res: &models.DeploymentGetResponse{Resources: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{ - {Info: &models.ElasticsearchClusterInfo{Status: ec.String("running")}}, - }, - Kibana: []*models.KibanaResourceInfo{ - {Info: &models.KibanaClusterInfo{Status: ec.String("running")}}, - }, - Apm: []*models.ApmResourceInfo{ - {Info: &models.ApmInfo{Status: ec.String("running")}}, - }, - EnterpriseSearch: []*models.EnterpriseSearchResourceInfo{ - {Info: &models.EnterpriseSearchInfo{Status: ec.String("running")}}, - }, - }}}, - want: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := hasRunningResources(tt.args.res); got != tt.want { - t.Errorf("hasRunningResources() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/ec/ecresource/deploymentresource/import.go b/ec/ecresource/deploymentresource/import.go deleted file mode 100644 index a12a7762e..000000000 --- a/ec/ecresource/deploymentresource/import.go +++ /dev/null @@ -1,76 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "context" - "errors" - "fmt" - - "github.com/blang/semver/v4" - - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deputil" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -// Setting this variable here so that it is parsed at compile time in case -// any errors are thrown, they are at compile time not when the user runs it. -var ilmVersion = semver.MustParse("6.6.0") - -// imports a deployment limitting the allowed version to 6.6.0 or higher. -// TODO: It might be desired to provide the ability to import a deployment -// specifying key:value pairs of secrets to populate as part of the -// import with an implementation of schema.StateContextFunc. -func importFunc(ctx context.Context, d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { - client := m.(*api.API) - res, err := deploymentapi.Get(deploymentapi.GetParams{ - API: client, - DeploymentID: d.Id(), - QueryParams: deputil.QueryParams{ - ShowPlans: true, - }, - }) - if err != nil { - return nil, err - } - - if len(res.Resources.Elasticsearch) == 0 { - return nil, errors.New( - "invalid deployment: deployment has no elasticsearch resources", - ) - } - - v, err := semver.New( - res.Resources.Elasticsearch[0].Info.PlanInfo.Current.Plan.Elasticsearch.Version, - ) - if err != nil { - return nil, fmt.Errorf("unable to parse deployment version: %w", err) - } - - if v.LT(ilmVersion) { - return nil, fmt.Errorf( - `invalid deployment version "%s": minimum supported version is "%s"`, - v.String(), ilmVersion.String(), - ) - } - - return []*schema.ResourceData{d}, nil -} diff --git a/ec/ecresource/deploymentresource/import_test.go b/ec/ecresource/deploymentresource/import_test.go deleted file mode 100644 index 6766e7ebb..000000000 --- a/ec/ecresource/deploymentresource/import_test.go +++ /dev/null @@ -1,241 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "context" - "errors" - "testing" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" - - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/models" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -func Test_importFunc(t *testing.T) { - deploymentWithImportableVersion := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-cross-cluster-search-v2", - "region": "us-east-1", - "version": "7.9.2", - "elasticsearch": []interface{}{map[string]interface{}{}}, - }, - }) - deploymentWithNonImportableVersion := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-cross-cluster-search-v2", - "region": "us-east-1", - "version": "5.6.1", - "elasticsearch": []interface{}{map[string]interface{}{}}, - }, - }) - deploymentWithNonImportableVersionSix := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-cross-cluster-search-v2", - "region": "us-east-1", - "version": "6.5.1", - "elasticsearch": []interface{}{map[string]interface{}{}}, - }, - }) - type args struct { - ctx context.Context - d *schema.ResourceData - m interface{} - } - tests := []struct { - name string - args args - want map[string]string - err error - }{ - { - name: "succeeds with an importable version", - args: args{ - d: deploymentWithImportableVersion, - m: api.NewMock(mock.New200Response(mock.NewStructBody(models.DeploymentGetResponse{ - Resources: &models.DeploymentResources{Elasticsearch: []*models.ElasticsearchResourceInfo{ - { - Info: &models.ElasticsearchClusterInfo{ - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Current: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.9.2", - }, - }, - }, - }, - }, - }, - }}, - }))), - }, - want: map[string]string{ - "id": "320b7b540dfc967a7a649c18e2fce4ed", - - "name": "my_deployment_name", - "region": "us-east-1", - "version": "7.9.2", - "deployment_template_id": "aws-cross-cluster-search-v2", - - "elasticsearch.#": "1", - "elasticsearch.0.autoscale": "", - "elasticsearch.0.cloud_id": "", - "elasticsearch.0.snapshot_source.#": "0", - "elasticsearch.0.config.#": "0", - "elasticsearch.0.extension.#": "0", - "elasticsearch.0.http_endpoint": "", - "elasticsearch.0.https_endpoint": "", - "elasticsearch.0.ref_id": "main-elasticsearch", - "elasticsearch.0.region": "", - "elasticsearch.0.remote_cluster.#": "0", - "elasticsearch.0.resource_id": "", - "elasticsearch.0.topology.#": "0", - "elasticsearch.0.trust_account.#": "0", - "elasticsearch.0.trust_external.#": "0", - "elasticsearch.0.strategy.#": "0", - }, - }, - { - name: "fails with a non importable version (5.6.1)", - args: args{ - d: deploymentWithNonImportableVersion, - m: api.NewMock(mock.New200Response(mock.NewStructBody(models.DeploymentGetResponse{ - Resources: &models.DeploymentResources{Elasticsearch: []*models.ElasticsearchResourceInfo{ - { - Info: &models.ElasticsearchClusterInfo{ - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Current: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "5.6.1", - }, - }, - }, - }, - }, - }, - }}, - }))), - }, - err: errors.New(`invalid deployment version "5.6.1": minimum supported version is "6.6.0"`), - want: map[string]string{ - "id": "320b7b540dfc967a7a649c18e2fce4ed", - - "name": "my_deployment_name", - "region": "us-east-1", - "version": "5.6.1", - "deployment_template_id": "aws-cross-cluster-search-v2", - - "elasticsearch.#": "1", - "elasticsearch.0.autoscale": "", - "elasticsearch.0.cloud_id": "", - "elasticsearch.0.snapshot_source.#": "0", - "elasticsearch.0.config.#": "0", - "elasticsearch.0.extension.#": "0", - "elasticsearch.0.http_endpoint": "", - "elasticsearch.0.https_endpoint": "", - "elasticsearch.0.ref_id": "main-elasticsearch", - "elasticsearch.0.region": "", - "elasticsearch.0.remote_cluster.#": "0", - "elasticsearch.0.resource_id": "", - "elasticsearch.0.topology.#": "0", - "elasticsearch.0.trust_account.#": "0", - "elasticsearch.0.trust_external.#": "0", - "elasticsearch.0.strategy.#": "0", - }, - }, - { - name: "fails with a non importable version (6.5.1)", - args: args{ - d: deploymentWithNonImportableVersionSix, - m: api.NewMock(mock.New200Response(mock.NewStructBody(models.DeploymentGetResponse{ - Resources: &models.DeploymentResources{Elasticsearch: []*models.ElasticsearchResourceInfo{ - { - Info: &models.ElasticsearchClusterInfo{ - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Current: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "6.5.1", - }, - }, - }, - }, - }, - }, - }}, - }))), - }, - err: errors.New(`invalid deployment version "6.5.1": minimum supported version is "6.6.0"`), - want: map[string]string{ - "id": "320b7b540dfc967a7a649c18e2fce4ed", - - "name": "my_deployment_name", - "region": "us-east-1", - "version": "6.5.1", - "deployment_template_id": "aws-cross-cluster-search-v2", - - "elasticsearch.#": "1", - "elasticsearch.0.autoscale": "", - "elasticsearch.0.cloud_id": "", - "elasticsearch.0.snapshot_source.#": "0", - "elasticsearch.0.config.#": "0", - "elasticsearch.0.extension.#": "0", - "elasticsearch.0.http_endpoint": "", - "elasticsearch.0.https_endpoint": "", - "elasticsearch.0.ref_id": "main-elasticsearch", - "elasticsearch.0.region": "", - "elasticsearch.0.remote_cluster.#": "0", - "elasticsearch.0.resource_id": "", - "elasticsearch.0.topology.#": "0", - "elasticsearch.0.trust_account.#": "0", - "elasticsearch.0.trust_external.#": "0", - "elasticsearch.0.strategy.#": "0", - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - _, err := importFunc(tt.args.ctx, tt.args.d, tt.args.m) - if tt.err != nil { - if !assert.EqualError(t, err, tt.err.Error()) { - t.Error(err) - } - } else { - assert.NoError(t, err) - } - - assert.Equal(t, tt.want, tt.args.d.State().Attributes) - }) - } -} diff --git a/ec/ecresource/deploymentresource/integrations_server_expanders.go b/ec/ecresource/deploymentresource/integrations_server_expanders.go deleted file mode 100644 index fd07ff8f9..000000000 --- a/ec/ecresource/deploymentresource/integrations_server_expanders.go +++ /dev/null @@ -1,207 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "encoding/json" - "errors" - "fmt" - - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -// expandIntegrationsServerResources expands IntegrationsServer resources into their models. -func expandIntegrationsServerResources(IntegrationsServers []interface{}, tpl *models.IntegrationsServerPayload) ([]*models.IntegrationsServerPayload, error) { - if len(IntegrationsServers) == 0 { - return nil, nil - } - - if tpl == nil { - return nil, errors.New("IntegrationsServer specified but deployment template is not configured for it. Use a different template if you wish to add IntegrationsServer") - } - - result := make([]*models.IntegrationsServerPayload, 0, len(IntegrationsServers)) - for _, raw := range IntegrationsServers { - resResource, err := expandIntegrationsServerResource(raw, tpl) - if err != nil { - return nil, err - } - result = append(result, resResource) - } - - return result, nil -} - -func expandIntegrationsServerResource(raw interface{}, res *models.IntegrationsServerPayload) (*models.IntegrationsServerPayload, error) { - var IntegrationsServer = raw.(map[string]interface{}) - - if esRefID, ok := IntegrationsServer["elasticsearch_cluster_ref_id"]; ok { - res.ElasticsearchClusterRefID = ec.String(esRefID.(string)) - } - - if refID, ok := IntegrationsServer["ref_id"]; ok { - res.RefID = ec.String(refID.(string)) - } - - if region, ok := IntegrationsServer["region"]; ok { - if r := region.(string); r != "" { - res.Region = ec.String(r) - } - } - - if cfg, ok := IntegrationsServer["config"]; ok { - if err := expandIntegrationsServerConfig(cfg, res.Plan.IntegrationsServer); err != nil { - return nil, err - } - } - - if rt, ok := IntegrationsServer["topology"]; ok && len(rt.([]interface{})) > 0 { - topology, err := expandIntegrationsServerTopology(rt, res.Plan.ClusterTopology) - if err != nil { - return nil, err - } - res.Plan.ClusterTopology = topology - } else { - res.Plan.ClusterTopology = defaultIntegrationsServerTopology(res.Plan.ClusterTopology) - } - - return res, nil -} - -func expandIntegrationsServerTopology(raw interface{}, topologies []*models.IntegrationsServerTopologyElement) ([]*models.IntegrationsServerTopologyElement, error) { - rawTopologies := raw.([]interface{}) - res := make([]*models.IntegrationsServerTopologyElement, 0, len(rawTopologies)) - - for i, rawTop := range rawTopologies { - topology := rawTop.(map[string]interface{}) - var icID string - if id, ok := topology["instance_configuration_id"]; ok { - icID = id.(string) - } - // When a topology element is set but no instance_configuration_id - // is set, then obtain the instance_configuration_id from the topology - // element. - if t := defaultIntegrationsServerTopology(topologies); icID == "" && len(t) >= i { - icID = t[i].InstanceConfigurationID - } - - size, err := util.ParseTopologySize(topology) - if err != nil { - return nil, err - } - - elem, err := matchIntegrationsServerTopology(icID, topologies) - if err != nil { - return nil, err - } - if size != nil { - elem.Size = size - } - - if zones, ok := topology["zone_count"]; ok { - if z := zones.(int); z > 0 { - elem.ZoneCount = int32(z) - } - - } - - res = append(res, elem) - } - - return res, nil -} - -func expandIntegrationsServerConfig(raw interface{}, res *models.IntegrationsServerConfiguration) error { - for _, rawCfg := range raw.([]interface{}) { - var cfg = rawCfg.(map[string]interface{}) - - if debugEnabled, ok := cfg["debug_enabled"]; ok { - if res.SystemSettings == nil { - res.SystemSettings = &models.IntegrationsServerSystemSettings{} - } - res.SystemSettings.DebugEnabled = ec.Bool(debugEnabled.(bool)) - } - - if settings, ok := cfg["user_settings_json"]; ok && settings != nil { - if s, ok := settings.(string); ok && s != "" { - if err := json.Unmarshal([]byte(s), &res.UserSettingsJSON); err != nil { - return fmt.Errorf("failed expanding IntegrationsServer user_settings_json: %w", err) - } - } - } - if settings, ok := cfg["user_settings_override_json"]; ok && settings != nil { - if s, ok := settings.(string); ok && s != "" { - if err := json.Unmarshal([]byte(s), &res.UserSettingsOverrideJSON); err != nil { - return fmt.Errorf("failed expanding IntegrationsServer user_settings_override_json: %w", err) - } - } - } - if settings, ok := cfg["user_settings_yaml"]; ok { - res.UserSettingsYaml = settings.(string) - } - if settings, ok := cfg["user_settings_override_yaml"]; ok { - res.UserSettingsOverrideYaml = settings.(string) - } - - if v, ok := cfg["docker_image"]; ok { - res.DockerImage = v.(string) - } - } - - return nil -} - -// defaultIntegrationsServerTopology iterates over all the templated topology elements and -// sets the size to the default when the template size is smaller than the -// deployment template default, the same is done on the ZoneCount. -func defaultIntegrationsServerTopology(topology []*models.IntegrationsServerTopologyElement) []*models.IntegrationsServerTopologyElement { - for _, t := range topology { - if *t.Size.Value < minimumIntegrationsServerSize { - t.Size.Value = ec.Int32(minimumIntegrationsServerSize) - } - if t.ZoneCount < minimumZoneCount { - t.ZoneCount = minimumZoneCount - } - } - - return topology -} - -func matchIntegrationsServerTopology(id string, topologies []*models.IntegrationsServerTopologyElement) (*models.IntegrationsServerTopologyElement, error) { - for _, t := range topologies { - if t.InstanceConfigurationID == id { - return t, nil - } - } - return nil, fmt.Errorf( - `IntegrationsServer topology: invalid instance_configuration_id: "%s" doesn't match any of the deployment template instance configurations`, - id, - ) -} - -// IntegrationsServerResource returns the IntegrationsServerPayload from a deployment -// template or an empty version of the payload. -func integrationsServerResource(res *models.DeploymentTemplateInfoV2) *models.IntegrationsServerPayload { - if len(res.DeploymentTemplate.Resources.IntegrationsServer) == 0 { - return nil - } - return res.DeploymentTemplate.Resources.IntegrationsServer[0] -} diff --git a/ec/ecresource/deploymentresource/integrations_server_expanders_test.go b/ec/ecresource/deploymentresource/integrations_server_expanders_test.go deleted file mode 100644 index 76d2b84b2..000000000 --- a/ec/ecresource/deploymentresource/integrations_server_expanders_test.go +++ /dev/null @@ -1,264 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" -) - -func Test_expandIntegrationsServerResources(t *testing.T) { - tplPath := "testdata/template-ece-3.0.0-default.json" - tpl := func() *models.IntegrationsServerPayload { - return integrationsServerResource(parseDeploymentTemplate(t, - tplPath, - )) - } - type args struct { - ess []interface{} - tpl *models.IntegrationsServerPayload - } - tests := []struct { - name string - args args - want []*models.IntegrationsServerPayload - err error - }{ - { - name: "returns nil when there's no resources", - }, - { - name: "parses an Integrations Server resource with explicit topology", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-integrations_server", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "integrations.server", - "size": "2g", - "size_resource": "memory", - "zone_count": 1, - }}, - }, - }, - }, - want: []*models.IntegrationsServerPayload{ - { - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-integrations_server"), - Plan: &models.IntegrationsServerPlan{ - IntegrationsServer: &models.IntegrationsServerConfiguration{}, - ClusterTopology: []*models.IntegrationsServerTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "integrations.server", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - }}, - }, - }, - }, - }, - { - name: "parses an Integrations Server resource with invalid instance_configuration_id", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-integrations_server", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "so invalid", - "size": "2g", - "size_resource": "memory", - "zone_count": 1, - }}, - }, - }, - }, - err: errors.New(`IntegrationsServer topology: invalid instance_configuration_id: "so invalid" doesn't match any of the deployment template instance configurations`), - }, - { - name: "parses an Integrations Server resource with no topology", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-integrations_server", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - }, - }, - }, - want: []*models.IntegrationsServerPayload{ - { - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-integrations_server"), - Plan: &models.IntegrationsServerPlan{ - IntegrationsServer: &models.IntegrationsServerConfiguration{}, - ClusterTopology: []*models.IntegrationsServerTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "integrations.server", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }}, - }, - }, - }, - }, - { - name: "parses an Integrations Server resource with a topology element but no instance_configuration_id", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-integrations_server", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "size": "2g", - "size_resource": "memory", - }}, - }, - }, - }, - want: []*models.IntegrationsServerPayload{ - { - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-integrations_server"), - Plan: &models.IntegrationsServerPlan{ - IntegrationsServer: &models.IntegrationsServerConfiguration{}, - ClusterTopology: []*models.IntegrationsServerTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "integrations.server", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - }}, - }, - }, - }, - }, - { - name: "parses an Integrations Server resource with explicit topology and some config", - args: args{ - tpl: tpl(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "tertiary-integrations_server", - "elasticsearch_cluster_ref_id": "somerefid", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: value2", - "user_settings_json": "{\"some.setting\": \"value\"}", - "user_settings_override_json": "{\"some.setting\": \"value2\"}", - "debug_enabled": true, - }}, - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "integrations.server", - "size": "4g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - want: []*models.IntegrationsServerPayload{{ - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("tertiary-integrations_server"), - Plan: &models.IntegrationsServerPlan{ - IntegrationsServer: &models.IntegrationsServerConfiguration{ - UserSettingsYaml: `some.setting: value`, - UserSettingsOverrideYaml: `some.setting: value2`, - UserSettingsJSON: map[string]interface{}{ - "some.setting": "value", - }, - UserSettingsOverrideJSON: map[string]interface{}{ - "some.setting": "value2", - }, - SystemSettings: &models.IntegrationsServerSystemSettings{ - DebugEnabled: ec.Bool(true), - }, - }, - ClusterTopology: []*models.IntegrationsServerTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "integrations.server", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - }}, - }, - }}, - }, - { - name: "tries to parse an integrations_server resource when the template doesn't have an Integrations Server instance set.", - args: args{ - tpl: nil, - ess: []interface{}{map[string]interface{}{ - "ref_id": "tertiary-integrations_server", - "elasticsearch_cluster_ref_id": "somerefid", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "integrations.server", - "size": "4g", - "size_resource": "memory", - "zone_count": 1, - }}, - "config": []interface{}{map[string]interface{}{ - "debug_enabled": true, - }}, - }}, - }, - err: errors.New("IntegrationsServer specified but deployment template is not configured for it. Use a different template if you wish to add IntegrationsServer"), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := expandIntegrationsServerResources(tt.args.ess, tt.args.tpl) - if !assert.Equal(t, tt.err, err) { - t.Error(err) - } - - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/ec/ecresource/deploymentresource/integrations_server_flatteners.go b/ec/ecresource/deploymentresource/integrations_server_flatteners.go deleted file mode 100644 index 7dd5e5261..000000000 --- a/ec/ecresource/deploymentresource/integrations_server_flatteners.go +++ /dev/null @@ -1,154 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "bytes" - "encoding/json" - - "github.com/elastic/cloud-sdk-go/pkg/models" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -// flattenIntegrationsServerResources flattens IntegrationsServer resources into its flattened structure. -func flattenIntegrationsServerResources(in []*models.IntegrationsServerResourceInfo, name string) []interface{} { - var result = make([]interface{}, 0, len(in)) - for _, res := range in { - var m = make(map[string]interface{}) - if util.IsCurrentIntegrationsServerPlanEmpty(res) || isIntegrationsServerResourceStopped(res) { - continue - } - - if res.RefID != nil && *res.RefID != "" { - m["ref_id"] = *res.RefID - } - - if res.Info.ID != nil && *res.Info.ID != "" { - m["resource_id"] = *res.Info.ID - } - - if res.Region != nil { - m["region"] = *res.Region - } - - plan := res.Info.PlanInfo.Current.Plan - if topology := flattenIntegrationsServerTopology(plan); len(topology) > 0 { - m["topology"] = topology - } - - if res.ElasticsearchClusterRefID != nil { - m["elasticsearch_cluster_ref_id"] = *res.ElasticsearchClusterRefID - } - - for k, v := range util.FlattenClusterEndpoint(res.Info.Metadata) { - m[k] = v - } - - if cfg := flattenIntegrationsServerConfig(plan.IntegrationsServer); len(cfg) > 0 { - m["config"] = cfg - } - - result = append(result, m) - } - - return result -} - -func flattenIntegrationsServerTopology(plan *models.IntegrationsServerPlan) []interface{} { - var result = make([]interface{}, 0, len(plan.ClusterTopology)) - for _, topology := range plan.ClusterTopology { - var m = make(map[string]interface{}) - if topology.Size == nil || topology.Size.Value == nil || *topology.Size.Value == 0 { - continue - } - - if topology.InstanceConfigurationID != "" { - m["instance_configuration_id"] = topology.InstanceConfigurationID - } - - if topology.Size != nil { - m["size"] = util.MemoryToState(*topology.Size.Value) - m["size_resource"] = *topology.Size.Resource - } - - m["zone_count"] = topology.ZoneCount - - result = append(result, m) - } - - return result -} - -func flattenIntegrationsServerConfig(cfg *models.IntegrationsServerConfiguration) []interface{} { - var m = make(map[string]interface{}) - if cfg == nil { - return nil - } - - if cfg.UserSettingsYaml != "" { - m["user_settings_yaml"] = cfg.UserSettingsYaml - } - - if cfg.UserSettingsOverrideYaml != "" { - m["user_settings_override_yaml"] = cfg.UserSettingsOverrideYaml - } - - if o := cfg.UserSettingsJSON; o != nil { - if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { - m["user_settings_json"] = string(b) - } - } - - if o := cfg.UserSettingsOverrideJSON; o != nil { - if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { - m["user_settings_override_json"] = string(b) - } - } - - if cfg.DockerImage != "" { - m["docker_image"] = cfg.DockerImage - } - - for k, v := range flattenIntegrationsServerSystemConfig(cfg.SystemSettings) { - m[k] = v - } - - if len(m) == 0 { - return nil - } - - return []interface{}{m} -} - -func flattenIntegrationsServerSystemConfig(cfg *models.IntegrationsServerSystemSettings) map[string]interface{} { - var m = make(map[string]interface{}) - if cfg == nil { - return nil - } - - if cfg.DebugEnabled != nil { - m["debug_enabled"] = *cfg.DebugEnabled - } - - if len(m) == 0 { - return nil - } - - return m -} diff --git a/ec/ecresource/deploymentresource/integrationsserver/v1/integrations_server.go b/ec/ecresource/deploymentresource/integrationsserver/v1/integrations_server.go new file mode 100644 index 000000000..f0a5b62cc --- /dev/null +++ b/ec/ecresource/deploymentresource/integrationsserver/v1/integrations_server.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + topologyv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/topology/v1" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type IntegrationsServerTF struct { + ElasticsearchClusterRefId types.String `tfsdk:"elasticsearch_cluster_ref_id"` + RefId types.String `tfsdk:"ref_id"` + ResourceId types.String `tfsdk:"resource_id"` + Region types.String `tfsdk:"region"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + Topology types.List `tfsdk:"topology"` + Config types.List `tfsdk:"config"` +} + +type IntegrationsServer struct { + ElasticsearchClusterRefId *string `tfsdk:"elasticsearch_cluster_ref_id"` + RefId *string `tfsdk:"ref_id"` + ResourceId *string `tfsdk:"resource_id"` + Region *string `tfsdk:"region"` + HttpEndpoint *string `tfsdk:"http_endpoint"` + HttpsEndpoint *string `tfsdk:"https_endpoint"` + Topology topologyv1.Topologies `tfsdk:"topology"` + Config IntegrationsServerConfigs `tfsdk:"config"` +} + +type IntegrationsServers []IntegrationsServer diff --git a/ec/ecresource/deploymentresource/integrationsserver/v1/integrations_server_config.go b/ec/ecresource/deploymentresource/integrationsserver/v1/integrations_server_config.go new file mode 100644 index 000000000..821f53fe0 --- /dev/null +++ b/ec/ecresource/deploymentresource/integrationsserver/v1/integrations_server_config.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type IntegrationsServerConfigTF struct { + DockerImage types.String `tfsdk:"docker_image"` + DebugEnabled types.Bool `tfsdk:"debug_enabled"` + UserSettingsJson types.String `tfsdk:"user_settings_json"` + UserSettingsOverrideJson types.String `tfsdk:"user_settings_override_json"` + UserSettingsYaml types.String `tfsdk:"user_settings_yaml"` + UserSettingsOverrideYaml types.String `tfsdk:"user_settings_override_yaml"` +} + +type IntegrationsServerConfig struct { + DockerImage *string `tfsdk:"docker_image"` + DebugEnabled *bool `tfsdk:"debug_enabled"` + UserSettingsJson *string `tfsdk:"user_settings_json"` + UserSettingsOverrideJson *string `tfsdk:"user_settings_override_json"` + UserSettingsYaml *string `tfsdk:"user_settings_yaml"` + UserSettingsOverrideYaml *string `tfsdk:"user_settings_override_yaml"` +} + +type IntegrationsServerConfigs []IntegrationsServerConfig diff --git a/ec/ecresource/deploymentresource/integrationsserver/v1/schema.go b/ec/ecresource/deploymentresource/integrationsserver/v1/schema.go new file mode 100644 index 000000000..66236e811 --- /dev/null +++ b/ec/ecresource/deploymentresource/integrationsserver/v1/schema.go @@ -0,0 +1,171 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func IntegrationsServerSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Integrations Server resource definition", + Optional: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "elasticsearch_cluster_ref_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), + resource.UseStateForUnknown(), + }, + }, + "ref_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-integrations_server"}), + resource.UseStateForUnknown(), + }, + }, + "resource_id": { + Type: types.StringType, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "region": { + Type: types.StringType, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "http_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "https_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "topology": { + Description: "Optional topology attribute", + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "instance_configuration_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "size": { + Type: types.StringType, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "size_resource": { + Type: types.StringType, + Description: `Optional size type, defaults to "memory".`, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "memory"}), + resource.UseStateForUnknown(), + }, + }, + "zone_count": { + Type: types.Int64Type, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + }), + }, + "config": { + Description: `Optionally define the IntegrationsServer configuration options for the IntegrationsServer Server`, + Optional: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + // TODO + // DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + "docker_image": { + Type: types.StringType, + Description: "Optionally override the docker image the IntegrationsServer nodes will use. Note that this field will only work for internal users only.", + Optional: true, + }, + // IntegrationsServer System Settings + "debug_enabled": { + Type: types.BoolType, + Description: `Optionally enable debug mode for IntegrationsServer servers - defaults to false`, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.Bool{Value: false}), + resource.UseStateForUnknown(), + }, + }, + "user_settings_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + }), + }, + }), + } +} diff --git a/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server.go b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server.go new file mode 100644 index 000000000..56f291e25 --- /dev/null +++ b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server.go @@ -0,0 +1,190 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/elastic/cloud-sdk-go/pkg/models" + topologyv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/topology/v1" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type IntegrationsServerTF struct { + ElasticsearchClusterRefId types.String `tfsdk:"elasticsearch_cluster_ref_id"` + RefId types.String `tfsdk:"ref_id"` + ResourceId types.String `tfsdk:"resource_id"` + Region types.String `tfsdk:"region"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + InstanceConfigurationId types.String `tfsdk:"instance_configuration_id"` + Size types.String `tfsdk:"size"` + SizeResource types.String `tfsdk:"size_resource"` + ZoneCount types.Int64 `tfsdk:"zone_count"` + Config types.Object `tfsdk:"config"` +} + +type IntegrationsServer struct { + ElasticsearchClusterRefId *string `tfsdk:"elasticsearch_cluster_ref_id"` + RefId *string `tfsdk:"ref_id"` + ResourceId *string `tfsdk:"resource_id"` + Region *string `tfsdk:"region"` + HttpEndpoint *string `tfsdk:"http_endpoint"` + HttpsEndpoint *string `tfsdk:"https_endpoint"` + InstanceConfigurationId *string `tfsdk:"instance_configuration_id"` + Size *string `tfsdk:"size"` + SizeResource *string `tfsdk:"size_resource"` + ZoneCount int `tfsdk:"zone_count"` + Config *IntegrationsServerConfig `tfsdk:"config"` +} + +func ReadIntegrationsServers(in []*models.IntegrationsServerResourceInfo) (*IntegrationsServer, error) { + for _, model := range in { + if util.IsCurrentIntegrationsServerPlanEmpty(model) || utils.IsIntegrationsServerResourceStopped(model) { + continue + } + + srv, err := readIntegrationsServer(model) + if err != nil { + return nil, err + } + + return srv, nil + } + + return nil, nil +} + +func readIntegrationsServer(in *models.IntegrationsServerResourceInfo) (*IntegrationsServer, error) { + + var srv IntegrationsServer + + srv.RefId = in.RefID + + srv.ResourceId = in.Info.ID + + srv.Region = in.Region + + plan := in.Info.PlanInfo.Current.Plan + + topologies, err := readIntegrationsServerTopologies(plan.ClusterTopology) + + if err != nil { + return nil, err + } + + if len(topologies) > 0 { + srv.InstanceConfigurationId = topologies[0].InstanceConfigurationId + srv.Size = topologies[0].Size + srv.SizeResource = topologies[0].SizeResource + srv.ZoneCount = topologies[0].ZoneCount + } + + srv.ElasticsearchClusterRefId = in.ElasticsearchClusterRefID + + srv.HttpEndpoint, srv.HttpsEndpoint = converters.ExtractEndpoints(in.Info.Metadata) + + cfg, err := readIntegrationsServerConfigs(plan.IntegrationsServer) + + if err != nil { + return nil, err + } + + srv.Config = cfg + + return &srv, nil +} + +func (srv IntegrationsServerTF) Payload(ctx context.Context, payload models.IntegrationsServerPayload) (*models.IntegrationsServerPayload, diag.Diagnostics) { + var diags diag.Diagnostics + + if !srv.ElasticsearchClusterRefId.IsNull() { + payload.ElasticsearchClusterRefID = &srv.ElasticsearchClusterRefId.Value + } + + if !srv.RefId.IsNull() { + payload.RefID = &srv.RefId.Value + } + + if srv.Region.Value != "" { + payload.Region = &srv.Region.Value + } + + ds := integrationsServerConfigPayload(ctx, srv.Config, payload.Plan.IntegrationsServer) + diags.Append(ds...) + + topologyTF := topologyv1.TopologyTF{ + InstanceConfigurationId: srv.InstanceConfigurationId, + Size: srv.Size, + SizeResource: srv.SizeResource, + ZoneCount: srv.ZoneCount, + } + + toplogyPayload, ds := integrationsServerTopologyPayload(ctx, topologyTF, defaultIntegrationsServerTopology(payload.Plan.ClusterTopology), 0) + + diags.Append(ds...) + + if !ds.HasError() && toplogyPayload != nil { + payload.Plan.ClusterTopology = []*models.IntegrationsServerTopologyElement{toplogyPayload} + } + + return &payload, diags +} + +func IntegrationsServerPayload(ctx context.Context, srvObj types.Object, template *models.DeploymentTemplateInfoV2) (*models.IntegrationsServerPayload, diag.Diagnostics) { + var diags diag.Diagnostics + + var srv *IntegrationsServerTF + + if diags = tfsdk.ValueAs(ctx, srvObj, &srv); diags.HasError() { + return nil, diags + } + + if srv == nil { + return nil, nil + } + + templatePayload := integrationsServerResource(template) + + if templatePayload == nil { + diags.AddError("integrations_server payload error", "integrations_server specified but deployment template is not configured for it. Use a different template if you wish to add integrations_server") + return nil, diags + } + + payload, diags := srv.Payload(ctx, *templatePayload) + + if diags.HasError() { + return nil, diags + } + + return payload, nil +} + +// integrationsServerResource returns the IntegrationsServerPayload from a deployment +// template or an empty version of the payload. +func integrationsServerResource(template *models.DeploymentTemplateInfoV2) *models.IntegrationsServerPayload { + if template == nil || len(template.DeploymentTemplate.Resources.IntegrationsServer) == 0 { + return nil + } + return template.DeploymentTemplate.Resources.IntegrationsServer[0] +} diff --git a/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_config.go b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_config.go new file mode 100644 index 000000000..636ee9fd1 --- /dev/null +++ b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_config.go @@ -0,0 +1,124 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/integrationsserver/v1" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +type IntegrationsServerConfig v1.IntegrationsServerConfig + +func readIntegrationsServerConfigs(in *models.IntegrationsServerConfiguration) (*IntegrationsServerConfig, error) { + var cfg IntegrationsServerConfig + + if in.UserSettingsYaml != "" { + cfg.UserSettingsYaml = &in.UserSettingsYaml + } + + if in.UserSettingsOverrideYaml != "" { + cfg.UserSettingsOverrideYaml = &in.UserSettingsOverrideYaml + } + + if o := in.UserSettingsJSON; o != nil { + if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { + cfg.UserSettingsJson = ec.String(string(b)) + } + } + + if o := in.UserSettingsOverrideJSON; o != nil { + if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { + cfg.UserSettingsOverrideJson = ec.String(string(b)) + } + } + + if in.DockerImage != "" { + cfg.DockerImage = &in.DockerImage + } + + if in.SystemSettings != nil { + if in.SystemSettings.DebugEnabled != nil { + cfg.DebugEnabled = in.SystemSettings.DebugEnabled + } + } + + if cfg == (IntegrationsServerConfig{}) { + return nil, nil + } + + return &cfg, nil +} + +func integrationsServerConfigPayload(ctx context.Context, cfgObj attr.Value, res *models.IntegrationsServerConfiguration) diag.Diagnostics { + var diags diag.Diagnostics + + if cfgObj.IsNull() || cfgObj.IsUnknown() { + return nil + } + + var cfg *v1.IntegrationsServerConfigTF + + if diags = tfsdk.ValueAs(ctx, cfgObj, &cfg); diags.HasError() { + return nil + } + + if cfg == nil { + return nil + } + + if !cfg.DebugEnabled.IsNull() { + if res.SystemSettings == nil { + res.SystemSettings = &models.IntegrationsServerSystemSettings{} + } + res.SystemSettings.DebugEnabled = &cfg.DebugEnabled.Value + } + + if cfg.UserSettingsJson.Value != "" { + if err := json.Unmarshal([]byte(cfg.UserSettingsJson.Value), &res.UserSettingsJSON); err != nil { + diags.AddError("failed expanding IntegrationsServer user_settings_json", err.Error()) + } + } + + if cfg.UserSettingsOverrideJson.Value != "" { + if err := json.Unmarshal([]byte(cfg.UserSettingsOverrideJson.Value), &res.UserSettingsOverrideJSON); err != nil { + diags.AddError("failed expanding IntegrationsServer user_settings_override_json", err.Error()) + } + } + + if !cfg.UserSettingsYaml.IsNull() { + res.UserSettingsYaml = cfg.UserSettingsYaml.Value + } + + if !cfg.UserSettingsOverrideYaml.IsNull() { + res.UserSettingsOverrideYaml = cfg.UserSettingsOverrideYaml.Value + } + + if !cfg.DockerImage.IsNull() { + res.DockerImage = cfg.DockerImage.Value + } + + return diags +} diff --git a/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_payload_test.go b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_payload_test.go new file mode 100644 index 000000000..baa001e2d --- /dev/null +++ b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_payload_test.go @@ -0,0 +1,254 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" + + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/testutil" +) + +func Test_IntegrationsServerPayload(t *testing.T) { + tplPath := "../../testdata/template-ece-3.0.0-default.json" + tpl := func() *models.DeploymentTemplateInfoV2 { + return testutil.ParseDeploymentTemplate(t, tplPath) + } + type args struct { + srv *IntegrationsServer + tpl *models.DeploymentTemplateInfoV2 + } + tests := []struct { + name string + args args + want *models.IntegrationsServerPayload + diags diag.Diagnostics + }{ + { + name: "returns nil when there's no resources", + }, + { + name: "parses an Integrations Server resource with explicit topology", + args: args{ + tpl: tpl(), + srv: &IntegrationsServer{ + RefId: ec.String("main-integrations_server"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + InstanceConfigurationId: ec.String("integrations.server"), + Size: ec.String("2g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + want: &models.IntegrationsServerPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-integrations_server"), + Plan: &models.IntegrationsServerPlan{ + IntegrationsServer: &models.IntegrationsServerConfiguration{}, + ClusterTopology: []*models.IntegrationsServerTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "integrations.server", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + }}, + }, + }, + }, + { + name: "parses an Integrations Server resource with invalid instance_configuration_id", + args: args{ + tpl: tpl(), + srv: &IntegrationsServer{ + RefId: ec.String("main-integrations_server"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + InstanceConfigurationId: ec.String("invalid"), + Size: ec.String("2g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("integrations_server topology payload error", `invalid instance_configuration_id: "invalid" doesn't match any of the deployment template instance configurations`) + return diags + }(), + }, + { + name: "parses an Integrations Server resource with no topology", + args: args{ + tpl: tpl(), + srv: &IntegrationsServer{ + RefId: ec.String("main-integrations_server"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + }, + }, + want: &models.IntegrationsServerPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-integrations_server"), + Plan: &models.IntegrationsServerPlan{ + IntegrationsServer: &models.IntegrationsServerConfiguration{}, + ClusterTopology: []*models.IntegrationsServerTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "integrations.server", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }}, + }, + }, + }, + { + name: "parses an Integrations Server resource with a topology element but no instance_configuration_id", + args: args{ + tpl: tpl(), + srv: &IntegrationsServer{ + RefId: ec.String("main-integrations_server"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + Size: ec.String("2g"), + SizeResource: ec.String("memory"), + }, + }, + want: &models.IntegrationsServerPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-integrations_server"), + Plan: &models.IntegrationsServerPlan{ + IntegrationsServer: &models.IntegrationsServerConfiguration{}, + ClusterTopology: []*models.IntegrationsServerTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "integrations.server", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + }}, + }, + }, + }, + { + name: "parses an Integrations Server resource with explicit topology and some config", + args: args{ + tpl: tpl(), + srv: &IntegrationsServer{ + RefId: ec.String("tertiary-integrations_server"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + Config: &IntegrationsServerConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\": \"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\": \"value2\"}"), + DebugEnabled: ec.Bool(true), + }, + InstanceConfigurationId: ec.String("integrations.server"), + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + want: &models.IntegrationsServerPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("tertiary-integrations_server"), + Plan: &models.IntegrationsServerPlan{ + IntegrationsServer: &models.IntegrationsServerConfiguration{ + UserSettingsYaml: `some.setting: value`, + UserSettingsOverrideYaml: `some.setting: value2`, + UserSettingsJSON: map[string]interface{}{ + "some.setting": "value", + }, + UserSettingsOverrideJSON: map[string]interface{}{ + "some.setting": "value2", + }, + SystemSettings: &models.IntegrationsServerSystemSettings{ + DebugEnabled: ec.Bool(true), + }, + }, + ClusterTopology: []*models.IntegrationsServerTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "integrations.server", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + }}, + }, + }, + }, + { + name: "tries to parse an integrations_server resource when the template doesn't have an Integrations Server instance set.", + args: args{ + tpl: nil, + srv: &IntegrationsServer{ + RefId: ec.String("tertiary-integrations_server"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + Config: &IntegrationsServerConfig{ + DebugEnabled: ec.Bool(true), + }, + InstanceConfigurationId: ec.String("integrations.server"), + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("integrations_server payload error", "integrations_server specified but deployment template is not configured for it. Use a different template if you wish to add integrations_server") + return diags + }(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var srv types.Object + diags := tfsdk.ValueFrom(context.Background(), tt.args.srv, IntegrationsServerSchema().FrameworkType(), &srv) + assert.Nil(t, diags) + + if got, diags := IntegrationsServerPayload(context.Background(), srv, tt.args.tpl); tt.diags != nil { + assert.Equal(t, tt.diags, diags) + } else { + assert.Nil(t, diags) + assert.Equal(t, tt.want, got) + } + }) + } +} diff --git a/ec/ecresource/deploymentresource/integrations_server_flatteners_test.go b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_read_test.go similarity index 71% rename from ec/ecresource/deploymentresource/integrations_server_flatteners_test.go rename to ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_read_test.go index 3791ae26d..74f75615d 100644 --- a/ec/ecresource/deploymentresource/integrations_server_flatteners_test.go +++ b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_read_test.go @@ -15,11 +15,14 @@ // specific language governing permissions and limitations // under the License. -package deploymentresource +package v2 import ( + "context" "testing" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" "github.com/elastic/cloud-sdk-go/pkg/api/mock" @@ -27,20 +30,19 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/util/ec" ) -func Test_flattenIntegrationsServerResource(t *testing.T) { +func Test_readIntegrationsServer(t *testing.T) { type args struct { - in []*models.IntegrationsServerResourceInfo - name string + in []*models.IntegrationsServerResourceInfo } tests := []struct { name string args args - want []interface{} + want *IntegrationsServer }{ { name: "empty resource list returns empty list", args: args{in: []*models.IntegrationsServerResourceInfo{}}, - want: []interface{}{}, + want: nil, }, { name: "empty current plan returns empty list", @@ -53,7 +55,7 @@ func Test_flattenIntegrationsServerResource(t *testing.T) { }, }, }}, - want: []interface{}{}, + want: nil, }, { name: "parses the integrations_server resource", @@ -94,23 +96,17 @@ func Test_flattenIntegrationsServerResource(t *testing.T) { }, }, }}, - want: []interface{}{ - map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-integrations_server", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "http_endpoint": "http://integrations_serverresource.cloud.elastic.co:9200", - "https_endpoint": "https://integrations_serverresource.cloud.elastic.co:9243", - "topology": []interface{}{ - map[string]interface{}{ - "instance_configuration_id": "aws.integrations_server.r4", - "size": "1g", - "size_resource": "memory", - "zone_count": int32(1), - }, - }, - }, + want: &IntegrationsServer{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-integrations_server"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + HttpEndpoint: ec.String("http://integrations_serverresource.cloud.elastic.co:9200"), + HttpsEndpoint: ec.String("https://integrations_serverresource.cloud.elastic.co:9243"), + InstanceConfigurationId: ec.String("aws.integrations_server.r4"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, }, }, { @@ -205,26 +201,24 @@ func Test_flattenIntegrationsServerResource(t *testing.T) { }, }, }}, - want: []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-integrations_server", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "http_endpoint": "http://integrations_serverresource.cloud.elastic.co:9200", - "https_endpoint": "https://integrations_serverresource.cloud.elastic.co:9243", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.integrations_server.r4", - "size": "1g", - "size_resource": "memory", - "zone_count": int32(1), - }}, - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: value2", - "user_settings_json": "{\"some.setting\":\"value\"}", - "user_settings_override_json": "{\"some.setting\":\"value2\"}", - }}, - }}, + want: &IntegrationsServer{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-integrations_server"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + HttpEndpoint: ec.String("http://integrations_serverresource.cloud.elastic.co:9200"), + HttpsEndpoint: ec.String("https://integrations_serverresource.cloud.elastic.co:9243"), + InstanceConfigurationId: ec.String("aws.integrations_server.r4"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + Config: &IntegrationsServerConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), + }, + }, }, { name: "parses the integrations_server resource with config overrides and system settings", @@ -276,34 +270,36 @@ func Test_flattenIntegrationsServerResource(t *testing.T) { }, }, }}, - want: []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-integrations_server", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "http_endpoint": "http://integrations_serverresource.cloud.elastic.co:9200", - "https_endpoint": "https://integrations_serverresource.cloud.elastic.co:9243", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.integrations_server.r4", - "size": "1g", - "size_resource": "memory", - "zone_count": int32(1), - }}, - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: value2", - "user_settings_json": "{\"some.setting\":\"value\"}", - "user_settings_override_json": "{\"some.setting\":\"value2\"}", - - "debug_enabled": true, - }}, - }}, + want: &IntegrationsServer{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-integrations_server"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + HttpEndpoint: ec.String("http://integrations_serverresource.cloud.elastic.co:9200"), + HttpsEndpoint: ec.String("https://integrations_serverresource.cloud.elastic.co:9243"), + InstanceConfigurationId: ec.String("aws.integrations_server.r4"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + Config: &IntegrationsServerConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), + DebugEnabled: ec.Bool(true), + }, + }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenIntegrationsServerResources(tt.args.in, tt.args.name) - assert.Equal(t, tt.want, got) + srv, err := ReadIntegrationsServers(tt.args.in) + assert.Nil(t, err) + assert.Equal(t, tt.want, srv) + + var srvTF types.Object + diags := tfsdk.ValueFrom(context.Background(), srv, IntegrationsServerSchema().FrameworkType(), &srvTF) + assert.Nil(t, diags) }) } } diff --git a/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_topology.go b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_topology.go new file mode 100644 index 000000000..5a4a728b3 --- /dev/null +++ b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_topology.go @@ -0,0 +1,139 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "fmt" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/diag" + + topologyv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/topology/v1" +) + +const ( + minimumIntegrationsServerSize = 1024 +) + +func integrationsServerTopologyPayload(ctx context.Context, topology topologyv1.TopologyTF, planModels []*models.IntegrationsServerTopologyElement, index int) (*models.IntegrationsServerTopologyElement, diag.Diagnostics) { + + icID := topology.InstanceConfigurationId.Value + + // When a topology element is set but no instance_configuration_id + // is set, then obtain the instance_configuration_id from the topology + // element. + if icID == "" && index < len(planModels) { + icID = planModels[index].InstanceConfigurationID + } + + var diags diag.Diagnostics + + size, err := converters.ParseTopologySizeTF(topology.Size, topology.SizeResource) + if err != nil { + diags.AddError("parse topology error", err.Error()) + return nil, diags + } + + elem, err := matchIntegrationsServerTopology(icID, planModels) + if err != nil { + diags.AddError("integrations_server topology payload error", err.Error()) + return nil, diags + } + + if size != nil { + elem.Size = size + } + + if topology.ZoneCount.Value > 0 { + elem.ZoneCount = int32(topology.ZoneCount.Value) + } + + return elem, nil +} + +func matchIntegrationsServerTopology(id string, topologies []*models.IntegrationsServerTopologyElement) (*models.IntegrationsServerTopologyElement, error) { + for _, t := range topologies { + if t.InstanceConfigurationID == id { + return t, nil + } + } + return nil, fmt.Errorf( + `invalid instance_configuration_id: "%s" doesn't match any of the deployment template instance configurations`, + id, + ) +} + +// DefaultIntegrationsServerTopology iterates over all the templated topology elements and +// sets the size to the default when the template size is smaller than the +// deployment template default, the same is done on the ZoneCount. +func defaultIntegrationsServerTopology(topology []*models.IntegrationsServerTopologyElement) []*models.IntegrationsServerTopologyElement { + for _, t := range topology { + if *t.Size.Value < minimumIntegrationsServerSize { + t.Size.Value = ec.Int32(minimumIntegrationsServerSize) + } + if t.ZoneCount < utils.MinimumZoneCount { + t.ZoneCount = utils.MinimumZoneCount + } + } + + return topology +} + +func readIntegrationsServerTopologies(in []*models.IntegrationsServerTopologyElement) (topologyv1.Topologies, error) { + if len(in) == 0 { + return nil, nil + } + + tops := make(topologyv1.Topologies, 0, len(in)) + for _, model := range in { + if model.Size == nil || model.Size.Value == nil || *model.Size.Value == 0 { + continue + } + + top, err := readIntegrationsServerTopology(model) + if err != nil { + return nil, err + } + + tops = append(tops, *top) + } + + return tops, nil +} + +func readIntegrationsServerTopology(in *models.IntegrationsServerTopologyElement) (*topologyv1.Topology, error) { + var top topologyv1.Topology + + if in.InstanceConfigurationID != "" { + top.InstanceConfigurationId = &in.InstanceConfigurationID + } + + if in.Size != nil { + top.Size = ec.String(util.MemoryToState(*in.Size.Value)) + top.SizeResource = ec.String(*in.Size.Resource) + } + + top.ZoneCount = int(in.ZoneCount) + + return &top, nil +} diff --git a/ec/ecresource/deploymentresource/integrationsserver/v2/schema.go b/ec/ecresource/deploymentresource/integrationsserver/v2/schema.go new file mode 100644 index 000000000..07a9a2df4 --- /dev/null +++ b/ec/ecresource/deploymentresource/integrationsserver/v2/schema.go @@ -0,0 +1,158 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func IntegrationsServerSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Integrations Server resource definition", + Optional: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "elasticsearch_cluster_ref_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), + resource.UseStateForUnknown(), + }, + }, + "ref_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-integrations_server"}), + resource.UseStateForUnknown(), + }, + }, + "resource_id": { + Type: types.StringType, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "region": { + Type: types.StringType, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "http_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "https_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "instance_configuration_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "size": { + Type: types.StringType, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "size_resource": { + Type: types.StringType, + Description: `Optional size type, defaults to "memory".`, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "memory"}), + resource.UseStateForUnknown(), + }, + }, + "zone_count": { + Type: types.Int64Type, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "config": { + Description: `Optionally define the IntegrationsServer configuration options for the IntegrationsServer Server`, + Optional: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + // TODO + // DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + "docker_image": { + Type: types.StringType, + Description: "Optionally override the docker image the IntegrationsServer nodes will use. Note that this field will only work for internal users only.", + Optional: true, + }, + // IntegrationsServer System Settings + "debug_enabled": { + Type: types.BoolType, + Description: `Optionally enable debug mode for IntegrationsServer servers - defaults to false`, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.Bool{Value: false}), + resource.UseStateForUnknown(), + }, + }, + "user_settings_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + }), + }, + }), + } +} diff --git a/ec/ecresource/deploymentresource/kibana/v1/kibana.go b/ec/ecresource/deploymentresource/kibana/v1/kibana.go new file mode 100644 index 000000000..5b9314e98 --- /dev/null +++ b/ec/ecresource/deploymentresource/kibana/v1/kibana.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + topologyv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/topology/v1" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type KibanaTF struct { + ElasticsearchClusterRefId types.String `tfsdk:"elasticsearch_cluster_ref_id"` + RefId types.String `tfsdk:"ref_id"` + ResourceId types.String `tfsdk:"resource_id"` + Region types.String `tfsdk:"region"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + Topology types.List `tfsdk:"topology"` + Config types.List `tfsdk:"config"` +} + +type Kibana struct { + ElasticsearchClusterRefId *string `tfsdk:"elasticsearch_cluster_ref_id"` + RefId *string `tfsdk:"ref_id"` + ResourceId *string `tfsdk:"resource_id"` + Region *string `tfsdk:"region"` + HttpEndpoint *string `tfsdk:"http_endpoint"` + HttpsEndpoint *string `tfsdk:"https_endpoint"` + Topology topologyv1.Topologies `tfsdk:"topology"` + Config KibanaConfigs `tfsdk:"config"` +} + +type Kibanas []Kibana + +type KibanaTopologiesTF []*topologyv1.TopologyTF diff --git a/ec/ecresource/deploymentresource/kibana/v1/kibana_config.go b/ec/ecresource/deploymentresource/kibana/v1/kibana_config.go new file mode 100644 index 000000000..41ccb33fc --- /dev/null +++ b/ec/ecresource/deploymentresource/kibana/v1/kibana_config.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type KibanaConfigTF struct { + DockerImage types.String `tfsdk:"docker_image"` + UserSettingsJson types.String `tfsdk:"user_settings_json"` + UserSettingsOverrideJson types.String `tfsdk:"user_settings_override_json"` + UserSettingsYaml types.String `tfsdk:"user_settings_yaml"` + UserSettingsOverrideYaml types.String `tfsdk:"user_settings_override_yaml"` +} + +type KibanaConfig struct { + DockerImage *string `tfsdk:"docker_image"` + UserSettingsJson *string `tfsdk:"user_settings_json"` + UserSettingsOverrideJson *string `tfsdk:"user_settings_override_json"` + UserSettingsYaml *string `tfsdk:"user_settings_yaml"` + UserSettingsOverrideYaml *string `tfsdk:"user_settings_override_yaml"` +} + +type KibanaConfigs []KibanaConfig diff --git a/ec/ecresource/deploymentresource/kibana/v1/schema.go b/ec/ecresource/deploymentresource/kibana/v1/schema.go new file mode 100644 index 000000000..65b5c8d62 --- /dev/null +++ b/ec/ecresource/deploymentresource/kibana/v1/schema.go @@ -0,0 +1,160 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func KibanaSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Kibana resource definition", + Optional: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "elasticsearch_cluster_ref_id": { + Type: types.StringType, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), + resource.UseStateForUnknown(), + }, + Computed: true, + Optional: true, + }, + "ref_id": { + Type: types.StringType, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-kibana"}), + resource.UseStateForUnknown(), + }, + Computed: true, + Optional: true, + }, + "resource_id": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "region": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "http_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "https_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "topology": { + Description: `Optional topology element`, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "instance_configuration_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "size": { + Type: types.StringType, + Computed: true, + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "size_resource": { + Type: types.StringType, + Description: `Optional size type, defaults to "memory".`, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "memory"}), + resource.UseStateForUnknown(), + }, + Computed: true, + Optional: true, + }, + "zone_count": { + Type: types.Int64Type, + Computed: true, + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + }), + }, + "config": { + Optional: true, + // TODO + // DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + Description: `Optionally define the Kibana configuration options for the Kibana Server`, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "docker_image": { + Type: types.StringType, + Description: "Optionally override the docker image the Kibana nodes will use. Note that this field will only work for internal users only.", + Optional: true, + }, + "user_settings_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + }), + }, + }), + } +} diff --git a/ec/ecresource/deploymentresource/kibana/v2/kibana.go b/ec/ecresource/deploymentresource/kibana/v2/kibana.go new file mode 100644 index 000000000..0b9d02f5b --- /dev/null +++ b/ec/ecresource/deploymentresource/kibana/v2/kibana.go @@ -0,0 +1,198 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/elastic/cloud-sdk-go/pkg/models" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v1" + topologyv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/topology/v1" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type KibanaTF struct { + ElasticsearchClusterRefId types.String `tfsdk:"elasticsearch_cluster_ref_id"` + RefId types.String `tfsdk:"ref_id"` + ResourceId types.String `tfsdk:"resource_id"` + Region types.String `tfsdk:"region"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + InstanceConfigurationId types.String `tfsdk:"instance_configuration_id"` + Size types.String `tfsdk:"size"` + SizeResource types.String `tfsdk:"size_resource"` + ZoneCount types.Int64 `tfsdk:"zone_count"` + Config types.Object `tfsdk:"config"` +} + +type Kibana struct { + ElasticsearchClusterRefId *string `tfsdk:"elasticsearch_cluster_ref_id"` + RefId *string `tfsdk:"ref_id"` + ResourceId *string `tfsdk:"resource_id"` + Region *string `tfsdk:"region"` + HttpEndpoint *string `tfsdk:"http_endpoint"` + HttpsEndpoint *string `tfsdk:"https_endpoint"` + InstanceConfigurationId *string `tfsdk:"instance_configuration_id"` + Size *string `tfsdk:"size"` + SizeResource *string `tfsdk:"size_resource"` + ZoneCount int `tfsdk:"zone_count"` + Config *KibanaConfig `tfsdk:"config"` +} + +func ReadKibanas(in []*models.KibanaResourceInfo) (*Kibana, error) { + for _, model := range in { + if util.IsCurrentKibanaPlanEmpty(model) || utils.IsKibanaResourceStopped(model) { + continue + } + + kibana, err := ReadKibana(model) + if err != nil { + return nil, err + } + + return kibana, nil + } + + return nil, nil +} + +func ReadKibana(in *models.KibanaResourceInfo) (*Kibana, error) { + var kibana Kibana + + kibana.RefId = in.RefID + + kibana.ResourceId = in.Info.ClusterID + + kibana.Region = in.Region + + plan := in.Info.PlanInfo.Current.Plan + var err error + + topologies, err := readKibanaTopologies(plan.ClusterTopology) + if err != nil { + return nil, err + } + + if len(topologies) > 0 { + kibana.InstanceConfigurationId = topologies[0].InstanceConfigurationId + kibana.Size = topologies[0].Size + kibana.SizeResource = topologies[0].SizeResource + kibana.ZoneCount = topologies[0].ZoneCount + } + + kibana.ElasticsearchClusterRefId = in.ElasticsearchClusterRefID + + kibana.HttpEndpoint, kibana.HttpsEndpoint = converters.ExtractEndpoints(in.Info.Metadata) + + config, err := readKibanaConfig(plan.Kibana) + if err != nil { + return nil, err + } + + kibana.Config = config + + return &kibana, nil +} + +func (kibana KibanaTF) Payload(ctx context.Context, payload models.KibanaPayload) (*models.KibanaPayload, diag.Diagnostics) { + var diags diag.Diagnostics + + if !kibana.ElasticsearchClusterRefId.IsNull() { + payload.ElasticsearchClusterRefID = &kibana.ElasticsearchClusterRefId.Value + } + + if !kibana.RefId.IsNull() { + payload.RefID = &kibana.RefId.Value + } + + if kibana.Region.Value != "" { + payload.Region = &kibana.Region.Value + } + + if !kibana.Config.IsNull() && !kibana.Config.IsUnknown() { + var config *v1.KibanaConfigTF + + ds := tfsdk.ValueAs(ctx, kibana.Config, &config) + + diags.Append(ds...) + + if !ds.HasError() { + diags.Append(kibanaConfigPayload(config, payload.Plan.Kibana)...) + } + } + + topologyTF := topologyv1.TopologyTF{ + InstanceConfigurationId: kibana.InstanceConfigurationId, + Size: kibana.Size, + SizeResource: kibana.SizeResource, + ZoneCount: kibana.ZoneCount, + } + + topologyPayload, ds := kibanaTopologyPayload(ctx, topologyTF, defaultKibanaTopology(payload.Plan.ClusterTopology), 0) + + diags.Append(ds...) + + if !ds.HasError() && topologyPayload != nil { + payload.Plan.ClusterTopology = []*models.KibanaClusterTopologyElement{topologyPayload} + } + + return &payload, diags +} + +func KibanaPayload(ctx context.Context, kibanaObj types.Object, template *models.DeploymentTemplateInfoV2) (*models.KibanaPayload, diag.Diagnostics) { + var kibanaTF *KibanaTF + + var diags diag.Diagnostics + + if diags = tfsdk.ValueAs(ctx, kibanaObj, &kibanaTF); diags.HasError() { + return nil, diags + } + + if kibanaTF == nil { + return nil, nil + } + + templatePlayload := kibanaResource(template) + + if templatePlayload == nil { + diags.AddError("kibana payload error", "kibana specified but deployment template is not configured for it. Use a different template if you wish to add kibana") + return nil, diags + } + + payload, diags := kibanaTF.Payload(ctx, *templatePlayload) + + if diags.HasError() { + return nil, diags + } + + return payload, nil +} + +// kibanaResource returns the KibanaPayload from a deployment +// template or an empty version of the payload. +func kibanaResource(res *models.DeploymentTemplateInfoV2) *models.KibanaPayload { + if res == nil || len(res.DeploymentTemplate.Resources.Kibana) == 0 { + return nil + } + return res.DeploymentTemplate.Resources.Kibana[0] +} diff --git a/ec/ecresource/deploymentresource/kibana/v2/kibana_config.go b/ec/ecresource/deploymentresource/kibana/v2/kibana_config.go new file mode 100644 index 000000000..bc2684ecc --- /dev/null +++ b/ec/ecresource/deploymentresource/kibana/v2/kibana_config.go @@ -0,0 +1,98 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "bytes" + "encoding/json" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v1" + "github.com/hashicorp/terraform-plugin-framework/diag" +) + +type KibanaConfig v1.KibanaConfig + +func readKibanaConfig(in *models.KibanaConfiguration) (*KibanaConfig, error) { + var cfg KibanaConfig + + if in.UserSettingsYaml != "" { + cfg.UserSettingsYaml = &in.UserSettingsYaml + } + + if in.UserSettingsOverrideYaml != "" { + cfg.UserSettingsOverrideYaml = &in.UserSettingsOverrideYaml + } + + if o := in.UserSettingsJSON; o != nil { + if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { + cfg.UserSettingsJson = ec.String(string(b)) + } + } + + if o := in.UserSettingsOverrideJSON; o != nil { + if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { + cfg.UserSettingsOverrideJson = ec.String(string(b)) + } + } + + if in.DockerImage != "" { + cfg.DockerImage = &in.DockerImage + } + + if cfg == (KibanaConfig{}) { + return nil, nil + } + + return &cfg, nil +} + +func kibanaConfigPayload(cfg *v1.KibanaConfigTF, model *models.KibanaConfiguration) diag.Diagnostics { + var diags diag.Diagnostics + + if cfg == nil { + return nil + } + + if cfg.UserSettingsJson.Value != "" { + if err := json.Unmarshal([]byte(cfg.UserSettingsJson.Value), &model.UserSettingsJSON); err != nil { + diags.AddError("failed expanding kibana user_settings_json", err.Error()) + } + } + + if cfg.UserSettingsOverrideJson.Value != "" { + if err := json.Unmarshal([]byte(cfg.UserSettingsOverrideJson.Value), &model.UserSettingsOverrideJSON); err != nil { + diags.AddError("failed expanding kibana user_settings_override_json", err.Error()) + } + } + + if !cfg.UserSettingsYaml.IsNull() { + model.UserSettingsYaml = cfg.UserSettingsYaml.Value + } + + if !cfg.UserSettingsOverrideYaml.IsNull() { + model.UserSettingsOverrideYaml = cfg.UserSettingsOverrideYaml.Value + } + + if !cfg.DockerImage.IsNull() { + model.DockerImage = cfg.DockerImage.Value + } + + return diags +} diff --git a/ec/ecresource/deploymentresource/kibana/v2/kibana_payload_test.go b/ec/ecresource/deploymentresource/kibana/v2/kibana_payload_test.go new file mode 100644 index 000000000..5ce453f6c --- /dev/null +++ b/ec/ecresource/deploymentresource/kibana/v2/kibana_payload_test.go @@ -0,0 +1,250 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" + + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/testutil" +) + +func Test_KibanaPayload(t *testing.T) { + tplPath := "../../testdata/template-aws-io-optimized-v2.json" + tpl := func() *models.DeploymentTemplateInfoV2 { + return testutil.ParseDeploymentTemplate(t, tplPath) + } + type args struct { + kibana *Kibana + tpl *models.DeploymentTemplateInfoV2 + } + tests := []struct { + name string + args args + want *models.KibanaPayload + diags diag.Diagnostics + }{ + { + name: "returns nil when there's no resources", + }, + { + name: "parses a kibana resource with topology", + args: args{ + tpl: tpl(), + kibana: &Kibana{ + RefId: ec.String("main-kibana"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + Size: ec.String("2g"), + ZoneCount: 1, + }, + }, + want: &models.KibanaPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + }, + }, + }, + }, + }, + { + name: "parses a kibana resource with incorrect instance_configuration_id", + args: args{ + tpl: tpl(), + kibana: &Kibana{ + RefId: ec.String("main-kibana"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + InstanceConfigurationId: ec.String("gcp.some.config"), + Size: ec.String("2g"), + ZoneCount: 1, + }, + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("kibana topology payload error", `kibana topology: invalid instance_configuration_id: "gcp.some.config" doesn't match any of the deployment template instance configurations`) + return diags + }(), + }, + { + name: "parses a kibana resource without topology", + args: args{ + tpl: tpl(), + kibana: &Kibana{ + RefId: ec.String("main-kibana"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + }, + }, + want: &models.KibanaPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + { + name: "parses a kibana resource with a topology but no instance_configuration_id", + args: args{ + tpl: tpl(), + kibana: &Kibana{ + RefId: ec.String("main-kibana"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + Size: ec.String("4g"), + }, + }, + want: &models.KibanaPayload{ + + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + }, + }, + }, + }, + }, + { + name: "parses a kibana resource with topology and settings", + args: args{ + tpl: tpl(), + kibana: &Kibana{ + RefId: ec.String("secondary-kibana"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + Config: &KibanaConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: override"), + UserSettingsJson: ec.String(`{"some.setting":"value"}`), + UserSettingsOverrideJson: ec.String(`{"some.setting":"override"}`), + }, + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + Size: ec.String("4g"), + ZoneCount: 1, + }, + }, + want: &models.KibanaPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("secondary-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{ + UserSettingsYaml: "some.setting: value", + UserSettingsOverrideYaml: "some.setting: override", + UserSettingsJSON: map[string]interface{}{ + "some.setting": "value", + }, + UserSettingsOverrideJSON: map[string]interface{}{ + "some.setting": "override", + }, + }, + ClusterTopology: []*models.KibanaClusterTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + }}, + }, + }, + }, + { + name: "tries to parse an kibana resource when the template doesn't have a kibana instance set.", + args: args{ + tpl: nil, + kibana: &Kibana{ + RefId: ec.String("tertiary-kibana"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("kibana payload error", "kibana specified but deployment template is not configured for it. Use a different template if you wish to add kibana") + return diags + }(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var kibana types.Object + diags := tfsdk.ValueFrom(context.Background(), tt.args.kibana, KibanaSchema().FrameworkType(), &kibana) + assert.Nil(t, diags) + + if got, diags := KibanaPayload(context.Background(), kibana, tt.args.tpl); tt.diags != nil { + assert.Equal(t, tt.diags, diags) + } else { + assert.Nil(t, diags) + assert.Equal(t, tt.want, got) + } + }) + } +} diff --git a/ec/ecresource/deploymentresource/kibana_flatteners_test.go b/ec/ecresource/deploymentresource/kibana/v2/kibana_read_test.go similarity index 57% rename from ec/ecresource/deploymentresource/kibana_flatteners_test.go rename to ec/ecresource/deploymentresource/kibana/v2/kibana_read_test.go index 0cd409805..e1144ef48 100644 --- a/ec/ecresource/deploymentresource/kibana_flatteners_test.go +++ b/ec/ecresource/deploymentresource/kibana/v2/kibana_read_test.go @@ -15,11 +15,14 @@ // specific language governing permissions and limitations // under the License. -package deploymentresource +package v2 import ( + "context" "testing" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" "github.com/elastic/cloud-sdk-go/pkg/api/mock" @@ -27,20 +30,19 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/util/ec" ) -func Test_flattenKibanaResources(t *testing.T) { +func Test_ReadKibana(t *testing.T) { type args struct { - in []*models.KibanaResourceInfo - name string + in []*models.KibanaResourceInfo } tests := []struct { name string args args - want []interface{} + want *Kibana }{ { name: "empty resource list returns empty list", args: args{in: []*models.KibanaResourceInfo{}}, - want: []interface{}{}, + want: nil, }, { name: "empty current plan returns empty list", @@ -53,48 +55,11 @@ func Test_flattenKibanaResources(t *testing.T) { }, }, }}, - want: []interface{}{}, + want: nil, }, { name: "parses the kibana resource", args: args{in: []*models.KibanaResourceInfo{ - { - Region: ec.String("some-region"), - RefID: ec.String("main-kibana"), - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Info: &models.KibanaClusterInfo{ - ClusterID: &mock.ValidClusterID, - ClusterName: ec.String("some-kibana-name"), - Region: "some-region", - Status: ec.String("started"), - Metadata: &models.ClusterMetadataInfo{ - Endpoint: "kibanaresource.cloud.elastic.co", - Ports: &models.ClusterMetadataPortInfo{ - HTTP: ec.Int32(9200), - HTTPS: ec.Int32(9243), - }, - }, - PlanInfo: &models.KibanaClusterPlansInfo{ - Current: &models.KibanaClusterPlanInfo{ - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{ - Version: "7.7.0", - }, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r4", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - }, - }, { Region: ec.String("some-region"), RefID: ec.String("main-kibana"), @@ -176,50 +141,35 @@ func Test_flattenKibanaResources(t *testing.T) { }, }, }}, - want: []interface{}{ - map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "http_endpoint": "http://kibanaresource.cloud.elastic.co:9200", - "https_endpoint": "https://kibanaresource.cloud.elastic.co:9243", - "topology": []interface{}{ - map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r4", - "size": "1g", - "size_resource": "memory", - "zone_count": int32(1), - }, - }, - }, - map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "http_endpoint": "http://kibanaresource.cloud.elastic.co:9200", - "https_endpoint": "https://kibanaresource.cloud.elastic.co:9243", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: override", - "user_settings_json": `{"some.setting":"value"}`, - "user_settings_override_json": `{"some.setting":"override"}`, - }}, - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r4", - "size": "1g", - "size_resource": "memory", - "zone_count": int32(1), - }}, + want: &Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + HttpEndpoint: ec.String("http://kibanaresource.cloud.elastic.co:9200"), + HttpsEndpoint: ec.String("https://kibanaresource.cloud.elastic.co:9243"), + Config: &KibanaConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: override"), + UserSettingsJson: ec.String(`{"some.setting":"value"}`), + UserSettingsOverrideJson: ec.String(`{"some.setting":"override"}`), }, + InstanceConfigurationId: ec.String("aws.kibana.r4"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenKibanaResources(tt.args.in, tt.args.name) - assert.Equal(t, tt.want, got) + kibana, err := ReadKibanas(tt.args.in) + assert.Nil(t, err) + assert.Equal(t, tt.want, kibana) + + var kibanaTF types.Object + diags := tfsdk.ValueFrom(context.Background(), kibana, KibanaSchema().FrameworkType(), &kibanaTF) + assert.Nil(t, diags) }) } } diff --git a/ec/ecresource/deploymentresource/kibana/v2/kibana_topology.go b/ec/ecresource/deploymentresource/kibana/v2/kibana_topology.go new file mode 100644 index 000000000..9e44cd7b2 --- /dev/null +++ b/ec/ecresource/deploymentresource/kibana/v2/kibana_topology.go @@ -0,0 +1,142 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "fmt" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + topologyv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/topology/v1" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/topology/v1" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/diag" + + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" +) + +const ( + minimumKibanaSize = 1024 +) + +func readKibanaTopology(in *models.KibanaClusterTopologyElement) (*topologyv1.Topology, error) { + var top topologyv1.Topology + + if in.InstanceConfigurationID != "" { + top.InstanceConfigurationId = &in.InstanceConfigurationID + } + + if in.Size != nil { + top.Size = ec.String(util.MemoryToState(*in.Size.Value)) + top.SizeResource = ec.String(*in.Size.Resource) + } + + top.ZoneCount = int(in.ZoneCount) + + return &top, nil +} + +type KibanaTopologiesTF []*topologyv1.TopologyTF + +func readKibanaTopologies(in []*models.KibanaClusterTopologyElement) (topologyv1.Topologies, error) { + if len(in) == 0 { + return nil, nil + } + + tops := make(topologyv1.Topologies, 0, len(in)) + for _, model := range in { + if model.Size == nil || model.Size.Value == nil || *model.Size.Value == 0 { + continue + } + + top, err := readKibanaTopology(model) + if err != nil { + return nil, err + } + + tops = append(tops, *top) + } + + return tops, nil +} + +// defaultKibnaTopology iterates over all the templated topology elements and +// sets the size to the default when the template size is greater than the +// local terraform default, the same is done on the ZoneCount. +func defaultKibanaTopology(topology []*models.KibanaClusterTopologyElement) []*models.KibanaClusterTopologyElement { + for _, t := range topology { + if *t.Size.Value > minimumKibanaSize { + t.Size.Value = ec.Int32(minimumKibanaSize) + } + if t.ZoneCount > utils.MinimumZoneCount { + t.ZoneCount = utils.MinimumZoneCount + } + } + + return topology +} + +func kibanaTopologyPayload(ctx context.Context, topology v1.TopologyTF, planModels []*models.KibanaClusterTopologyElement, index int) (*models.KibanaClusterTopologyElement, diag.Diagnostics) { + + icID := topology.InstanceConfigurationId.Value + + // When a topology element is set but no instance_configuration_id + // is set, then obtain the instance_configuration_id from the topology + // element. + if icID == "" && index < len(planModels) { + icID = planModels[index].InstanceConfigurationID + } + + size, err := converters.ParseTopologySizeTF(topology.Size, topology.SizeResource) + + var diags diag.Diagnostics + if err != nil { + diags.AddError("size parsing error", err.Error()) + return nil, diags + } + + elem, err := matchKibanaTopology(icID, planModels) + if err != nil { + diags.AddError("kibana topology payload error", err.Error()) + return nil, diags + } + + if size != nil { + elem.Size = size + } + + if topology.ZoneCount.Value > 0 { + elem.ZoneCount = int32(topology.ZoneCount.Value) + } + + return elem, nil +} + +func matchKibanaTopology(id string, topologies []*models.KibanaClusterTopologyElement) (*models.KibanaClusterTopologyElement, error) { + for _, t := range topologies { + if t.InstanceConfigurationID == id { + return t, nil + } + } + return nil, fmt.Errorf( + `kibana topology: invalid instance_configuration_id: "%s" doesn't match any of the deployment template instance configurations`, + id, + ) +} diff --git a/ec/ecresource/deploymentresource/kibana/v2/schema.go b/ec/ecresource/deploymentresource/kibana/v2/schema.go new file mode 100644 index 000000000..d15a75119 --- /dev/null +++ b/ec/ecresource/deploymentresource/kibana/v2/schema.go @@ -0,0 +1,147 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func KibanaSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Kibana resource definition", + Optional: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "elasticsearch_cluster_ref_id": { + Type: types.StringType, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), + resource.UseStateForUnknown(), + }, + Computed: true, + Optional: true, + }, + "ref_id": { + Type: types.StringType, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-kibana"}), + resource.UseStateForUnknown(), + }, + Computed: true, + Optional: true, + }, + "resource_id": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "region": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "http_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "https_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "instance_configuration_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "size": { + Type: types.StringType, + Computed: true, + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "size_resource": { + Type: types.StringType, + Description: `Optional size type, defaults to "memory".`, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "memory"}), + resource.UseStateForUnknown(), + }, + Computed: true, + Optional: true, + }, + "zone_count": { + Type: types.Int64Type, + Computed: true, + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "config": { + Optional: true, + // TODO + // DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + Description: `Optionally define the Kibana configuration options for the Kibana Server`, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "docker_image": { + Type: types.StringType, + Description: "Optionally override the docker image the Kibana nodes will use. Note that this field will only work for internal users only.", + Optional: true, + }, + "user_settings_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + }), + }, + }), + } +} diff --git a/ec/ecresource/deploymentresource/kibana_expanders.go b/ec/ecresource/deploymentresource/kibana_expanders.go deleted file mode 100644 index b5c568513..000000000 --- a/ec/ecresource/deploymentresource/kibana_expanders.go +++ /dev/null @@ -1,196 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "encoding/json" - "errors" - "fmt" - - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -// expandKibanaResources expands the flattened kibana resources into its models. -func expandKibanaResources(kibanas []interface{}, tpl *models.KibanaPayload) ([]*models.KibanaPayload, error) { - if len(kibanas) == 0 { - return nil, nil - } - - if tpl == nil { - return nil, errors.New("kibana specified but deployment template is not configured for it. Use a different template if you wish to add kibana") - } - - result := make([]*models.KibanaPayload, 0, len(kibanas)) - for _, raw := range kibanas { - resResource, err := expandKibanaResource(raw, tpl) - if err != nil { - return nil, err - } - result = append(result, resResource) - } - - return result, nil -} - -func expandKibanaResource(raw interface{}, res *models.KibanaPayload) (*models.KibanaPayload, error) { - kibana := raw.(map[string]interface{}) - - if esRefID, ok := kibana["elasticsearch_cluster_ref_id"]; ok { - res.ElasticsearchClusterRefID = ec.String(esRefID.(string)) - } - - if refID, ok := kibana["ref_id"]; ok { - res.RefID = ec.String(refID.(string)) - } - - if region, ok := kibana["region"]; ok { - if r := region.(string); r != "" { - res.Region = ec.String(r) - } - } - - if cfg, ok := kibana["config"]; ok { - if err := expandKibanaConfig(cfg, res.Plan.Kibana); err != nil { - return nil, err - } - } - - if rt, ok := kibana["topology"]; ok && len(rt.([]interface{})) > 0 { - topology, err := expandKibanaTopology(rt, res.Plan.ClusterTopology) - if err != nil { - return nil, err - } - res.Plan.ClusterTopology = topology - } else { - res.Plan.ClusterTopology = defaultKibanaTopology(res.Plan.ClusterTopology) - } - - return res, nil -} - -func expandKibanaTopology(raw interface{}, topologies []*models.KibanaClusterTopologyElement) ([]*models.KibanaClusterTopologyElement, error) { - var rawTopologies = raw.([]interface{}) - var res = make([]*models.KibanaClusterTopologyElement, 0, len(rawTopologies)) - for i, rawTop := range rawTopologies { - var topology = rawTop.(map[string]interface{}) - var icID string - if id, ok := topology["instance_configuration_id"]; ok { - icID = id.(string) - } - // When a topology element is set but no instance_configuration_id - // is set, then obtain the instance_configuration_id from the topology - // element. - if t := defaultKibanaTopology(topologies); icID == "" && len(t) >= i { - icID = t[i].InstanceConfigurationID - } - size, err := util.ParseTopologySize(topology) - if err != nil { - return nil, err - } - - elem, err := matchKibanaTopology(icID, topologies) - if err != nil { - return nil, err - } - if size != nil { - elem.Size = size - } - - if zones, ok := topology["zone_count"]; ok { - if z := zones.(int); z > 0 { - elem.ZoneCount = int32(z) - } - } - - res = append(res, elem) - } - - return res, nil -} - -func expandKibanaConfig(raw interface{}, res *models.KibanaConfiguration) error { - for _, rawCfg := range raw.([]interface{}) { - var cfg = rawCfg.(map[string]interface{}) - if settings, ok := cfg["user_settings_json"]; ok && settings != nil { - if s, ok := settings.(string); ok && s != "" { - if err := json.Unmarshal([]byte(s), &res.UserSettingsJSON); err != nil { - return fmt.Errorf("failed expanding kibana user_settings_json: %w", err) - } - } - } - if settings, ok := cfg["user_settings_override_json"]; ok && settings != nil { - if s, ok := settings.(string); ok && s != "" { - if err := json.Unmarshal([]byte(s), &res.UserSettingsOverrideJSON); err != nil { - return fmt.Errorf("failed expanding kibana user_settings_override_json: %w", err) - } - } - } - if settings, ok := cfg["user_settings_yaml"]; ok { - res.UserSettingsYaml = settings.(string) - } - if settings, ok := cfg["user_settings_override_yaml"]; ok { - res.UserSettingsOverrideYaml = settings.(string) - } - - if v, ok := cfg["docker_image"]; ok { - res.DockerImage = v.(string) - } - } - - return nil -} - -// defaultApmTopology iterates over all the templated topology elements and -// sets the size to the default when the template size is greater than the -// local terraform default, the same is done on the ZoneCount. -func defaultKibanaTopology(topology []*models.KibanaClusterTopologyElement) []*models.KibanaClusterTopologyElement { - for _, t := range topology { - if *t.Size.Value > minimumKibanaSize { - t.Size.Value = ec.Int32(minimumKibanaSize) - } - if t.ZoneCount > minimumZoneCount { - t.ZoneCount = minimumZoneCount - } - } - - return topology -} - -func matchKibanaTopology(id string, topologies []*models.KibanaClusterTopologyElement) (*models.KibanaClusterTopologyElement, error) { - for _, t := range topologies { - if t.InstanceConfigurationID == id { - return t, nil - } - } - return nil, fmt.Errorf( - `kibana topology: invalid instance_configuration_id: "%s" doesn't match any of the deployment template instance configurations`, - id, - ) -} - -// kibanaResource returns the KibanaPayload from a deployment -// template or an empty version of the payload. -func kibanaResource(res *models.DeploymentTemplateInfoV2) *models.KibanaPayload { - if len(res.DeploymentTemplate.Resources.Kibana) == 0 { - return nil - } - return res.DeploymentTemplate.Resources.Kibana[0] -} diff --git a/ec/ecresource/deploymentresource/kibana_expanders_test.go b/ec/ecresource/deploymentresource/kibana_expanders_test.go deleted file mode 100644 index 02a0a8f17..000000000 --- a/ec/ecresource/deploymentresource/kibana_expanders_test.go +++ /dev/null @@ -1,261 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" -) - -func Test_expandKibanaResources(t *testing.T) { - tplPath := "testdata/template-aws-io-optimized-v2.json" - tpl := func() *models.KibanaPayload { - return kibanaResource(parseDeploymentTemplate(t, - tplPath, - )) - } - type args struct { - ess []interface{} - tpl *models.KibanaPayload - } - tests := []struct { - name string - args args - want []*models.KibanaPayload - err error - }{ - { - name: "returns nil when there's no resources", - }, - { - name: "parses a kibana resource with topology", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-kibana", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r5d", - "size": "2g", - "zone_count": 1, - }}, - }, - }, - }, - want: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - }, - }, - }, - }, - }, - }, - { - name: "parses a kibana resource with incorrect instance_configuration_id", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-kibana", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "gcp.some.config", - "size": "2g", - "zone_count": 1, - }}, - }, - }, - }, - err: errors.New(`kibana topology: invalid instance_configuration_id: "gcp.some.config" doesn't match any of the deployment template instance configurations`), - }, - { - name: "parses a kibana resource without topology", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-kibana", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - }, - }, - }, - want: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - }, - { - name: "parses a kibana resource with a topology but no instance_configuration_id", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-kibana", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "size": "4g", - }}, - }, - }, - }, - want: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - }, - }, - }, - }, - }, - }, - { - name: "parses a kibana resource with topology and settings", - args: args{ - tpl: tpl(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "secondary-kibana", - "elasticsearch_cluster_ref_id": "somerefid", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: override", - "user_settings_json": `{"some.setting":"value"}`, - "user_settings_override_json": `{"some.setting":"override"}`, - }}, - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r5d", - "size": "4g", - "zone_count": 1, - }}, - }}, - }, - want: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("secondary-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{ - UserSettingsYaml: "some.setting: value", - UserSettingsOverrideYaml: "some.setting: override", - UserSettingsJSON: map[string]interface{}{ - "some.setting": "value", - }, - UserSettingsOverrideJSON: map[string]interface{}{ - "some.setting": "override", - }, - }, - ClusterTopology: []*models.KibanaClusterTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - }}, - }, - }, - }, - }, - { - name: "tries to parse an kibana resource when the template doesn't have a kibana instance set.", - args: args{ - tpl: nil, - ess: []interface{}{map[string]interface{}{ - "ref_id": "tertiary-kibana", - "elasticsearch_cluster_ref_id": "somerefid", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r5d", - "size": "1g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - err: errors.New("kibana specified but deployment template is not configured for it. Use a different template if you wish to add kibana"), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := expandKibanaResources(tt.args.ess, tt.args.tpl) - if !assert.Equal(t, tt.err, err) { - t.Error(err) - } - - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/ec/ecresource/deploymentresource/kibana_flatteners.go b/ec/ecresource/deploymentresource/kibana_flatteners.go deleted file mode 100644 index fb9a2ff87..000000000 --- a/ec/ecresource/deploymentresource/kibana_flatteners.go +++ /dev/null @@ -1,134 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "bytes" - "encoding/json" - - "github.com/elastic/cloud-sdk-go/pkg/models" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -// flattenKibanaResources takes the kibana resource models and returns them flattened. -func flattenKibanaResources(in []*models.KibanaResourceInfo, name string) []interface{} { - result := make([]interface{}, 0, len(in)) - for _, res := range in { - m := make(map[string]interface{}) - if util.IsCurrentKibanaPlanEmpty(res) || isKibanaResourceStopped(res) { - continue - } - - if res.RefID != nil && *res.RefID != "" { - m["ref_id"] = *res.RefID - } - - if res.Info.ClusterID != nil && *res.Info.ClusterID != "" { - m["resource_id"] = *res.Info.ClusterID - } - - if res.Region != nil { - m["region"] = *res.Region - } - - plan := res.Info.PlanInfo.Current.Plan - if topology := flattenKibanaTopology(plan); len(topology) > 0 { - m["topology"] = topology - } - - if res.ElasticsearchClusterRefID != nil { - m["elasticsearch_cluster_ref_id"] = *res.ElasticsearchClusterRefID - } - - for k, v := range util.FlattenClusterEndpoint(res.Info.Metadata) { - m[k] = v - } - - if c := flattenKibanaConfig(plan.Kibana); len(c) > 0 { - m["config"] = c - } - - result = append(result, m) - } - - return result -} - -func flattenKibanaTopology(plan *models.KibanaClusterPlan) []interface{} { - var result = make([]interface{}, 0, len(plan.ClusterTopology)) - for _, topology := range plan.ClusterTopology { - var m = make(map[string]interface{}) - if topology.Size == nil || topology.Size.Value == nil || *topology.Size.Value == 0 { - continue - } - - if topology.InstanceConfigurationID != "" { - m["instance_configuration_id"] = topology.InstanceConfigurationID - } - - if topology.Size != nil { - m["size"] = util.MemoryToState(*topology.Size.Value) - m["size_resource"] = *topology.Size.Resource - - } - - m["zone_count"] = topology.ZoneCount - - result = append(result, m) - } - - return result -} - -func flattenKibanaConfig(cfg *models.KibanaConfiguration) []interface{} { - var m = make(map[string]interface{}) - if cfg == nil { - return nil - } - - if cfg.UserSettingsYaml != "" { - m["user_settings_yaml"] = cfg.UserSettingsYaml - } - - if cfg.UserSettingsOverrideYaml != "" { - m["user_settings_override_yaml"] = cfg.UserSettingsOverrideYaml - } - - if o := cfg.UserSettingsJSON; o != nil { - if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { - m["user_settings_json"] = string(b) - } - } - - if o := cfg.UserSettingsOverrideJSON; o != nil { - if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { - m["user_settings_override_json"] = string(b) - } - } - - if cfg.DockerImage != "" { - m["docker_image"] = cfg.DockerImage - } - - if len(m) == 0 { - return nil - } - - return []interface{}{m} -} diff --git a/ec/ecresource/deploymentresource/observability.go b/ec/ecresource/deploymentresource/observability.go deleted file mode 100644 index 34b34e82f..000000000 --- a/ec/ecresource/deploymentresource/observability.go +++ /dev/null @@ -1,117 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "fmt" - - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" -) - -// flattenObservability parses a deployment's observability settings. -func flattenObservability(settings *models.DeploymentSettings) []interface{} { - if settings == nil || settings.Observability == nil { - return nil - } - - var m = make(map[string]interface{}) - - // We are only accepting a single deployment ID and refID for both logs and metrics. - // If either of them is not nil the deployment ID and refID will be filled. - if settings.Observability.Metrics != nil { - m["deployment_id"] = settings.Observability.Metrics.Destination.DeploymentID - m["ref_id"] = settings.Observability.Metrics.Destination.RefID - m["metrics"] = true - } - - if settings.Observability.Logging != nil { - m["deployment_id"] = settings.Observability.Logging.Destination.DeploymentID - m["ref_id"] = settings.Observability.Logging.Destination.RefID - m["logs"] = true - } - - if len(m) == 0 { - return nil - } - - return []interface{}{m} -} - -func expandObservability(raw []interface{}, client *api.API) (*models.DeploymentObservabilitySettings, error) { - if len(raw) == 0 { - return nil, nil - } - - var req models.DeploymentObservabilitySettings - - for _, rawObs := range raw { - var obs = rawObs.(map[string]interface{}) - - depID, ok := obs["deployment_id"] - if !ok { - return nil, nil - } - - refID, ok := obs["ref_id"] - if depID == "self" { - // For self monitoring, the refID is not mandatory - if !ok { - refID = "" - } - } else if !ok || refID == "" { - // Since ms-77, the refID is optional. - // To not break ECE users with older versions, we still pre-calculate the refID here - params := deploymentapi.PopulateRefIDParams{ - Kind: util.Elasticsearch, - API: client, - DeploymentID: depID.(string), - RefID: ec.String(""), - } - - if err := deploymentapi.PopulateRefID(params); err != nil { - return nil, fmt.Errorf("observability ref_id auto discovery: %w", err) - } - - refID = *params.RefID - } - - if logging := obs["logs"]; logging.(bool) { - req.Logging = &models.DeploymentLoggingSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: ec.String(depID.(string)), - RefID: refID.(string), - }, - } - } - - if metrics := obs["metrics"]; metrics.(bool) { - req.Metrics = &models.DeploymentMetricsSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: ec.String(depID.(string)), - RefID: refID.(string), - }, - } - } - } - - return &req, nil -} diff --git a/ec/ecresource/deploymentresource/observability/v1/observability.go b/ec/ecresource/deploymentresource/observability/v1/observability.go new file mode 100644 index 000000000..6075e336b --- /dev/null +++ b/ec/ecresource/deploymentresource/observability/v1/observability.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ObservabilityTF struct { + DeploymentId types.String `tfsdk:"deployment_id"` + RefId types.String `tfsdk:"ref_id"` + Logs types.Bool `tfsdk:"logs"` + Metrics types.Bool `tfsdk:"metrics"` +} + +type Observability struct { + DeploymentId *string `tfsdk:"deployment_id"` + RefId *string `tfsdk:"ref_id"` + Logs bool `tfsdk:"logs"` + Metrics bool `tfsdk:"metrics"` +} + +type Observabilities []Observability diff --git a/ec/ecresource/deploymentresource/observability/v1/schema.go b/ec/ecresource/deploymentresource/observability/v1/schema.go new file mode 100644 index 000000000..20e283ddf --- /dev/null +++ b/ec/ecresource/deploymentresource/observability/v1/schema.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func ObservabilitySchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional observability settings. Ship logs and metrics to a dedicated deployment.", + Optional: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "deployment_id": { + Type: types.StringType, + Required: true, + }, + "ref_id": { + Type: types.StringType, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "logs": { + Type: types.BoolType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.Bool{Value: true}), + resource.UseStateForUnknown(), + }, + }, + "metrics": { + Type: types.BoolType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.Bool{Value: true}), + resource.UseStateForUnknown(), + }, + }, + }), + } +} diff --git a/ec/ecresource/deploymentresource/observability/v2/observability.go b/ec/ecresource/deploymentresource/observability/v2/observability.go new file mode 100644 index 000000000..3a5006a5d --- /dev/null +++ b/ec/ecresource/deploymentresource/observability/v2/observability.go @@ -0,0 +1,130 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v1" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ObservabilityTF = v1.ObservabilityTF + +type Observability = v1.Observability + +type Observabilities []Observability + +func ObservabilityPayload(ctx context.Context, obsObj types.Object, client *api.API) (*models.DeploymentObservabilitySettings, diag.Diagnostics) { + var observability *ObservabilityTF + + if diags := tfsdk.ValueAs(ctx, obsObj, &observability); diags.HasError() { + return nil, nil + } + + if observability == nil { + return nil, nil + } + + var payload models.DeploymentObservabilitySettings + + if observability.DeploymentId.Value == "" { + return nil, nil + } + + refID := observability.RefId.Value + + if observability.DeploymentId.Value != "self" && refID == "" { + // Since ms-77, the refID is optional. + // To not break ECE users with older versions, we still pre-calculate the refID here + params := deploymentapi.PopulateRefIDParams{ + Kind: util.Elasticsearch, + API: client, + DeploymentID: observability.DeploymentId.Value, + RefID: ec.String(""), + } + + if err := deploymentapi.PopulateRefID(params); err != nil { + var diags diag.Diagnostics + diags.AddError("observability ref_id auto discovery", err.Error()) + return nil, diags + } + + refID = *params.RefID + } + + if observability.Logs.Value { + payload.Logging = &models.DeploymentLoggingSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: ec.String(observability.DeploymentId.Value), + RefID: refID, + }, + } + } + + if observability.Metrics.Value { + payload.Metrics = &models.DeploymentMetricsSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: ec.String(observability.DeploymentId.Value), + RefID: refID, + }, + } + } + + return &payload, nil +} + +func ReadObservability(in *models.DeploymentSettings) (*Observability, error) { + if in == nil || in.Observability == nil { + return nil, nil + } + + var obs Observability + + // We are only accepting a single deployment ID and refID for both logs and metrics. + // If either of them is not nil the deployment ID and refID will be filled. + if in.Observability.Metrics != nil { + if in.Observability.Metrics.Destination.DeploymentID != nil { + obs.DeploymentId = in.Observability.Metrics.Destination.DeploymentID + } + + obs.RefId = &in.Observability.Metrics.Destination.RefID + obs.Metrics = true + } + + if in.Observability.Logging != nil { + if in.Observability.Logging.Destination.DeploymentID != nil { + obs.DeploymentId = in.Observability.Logging.Destination.DeploymentID + } + obs.RefId = &in.Observability.Logging.Destination.RefID + obs.Logs = true + } + + if obs == (Observability{}) { + return nil, nil + } + + return &obs, nil +} diff --git a/ec/ecresource/deploymentresource/observability_test.go b/ec/ecresource/deploymentresource/observability/v2/observability_payload_test.go similarity index 61% rename from ec/ecresource/deploymentresource/observability_test.go rename to ec/ecresource/deploymentresource/observability/v2/observability_payload_test.go index f7119f4ab..f4fd06fd7 100644 --- a/ec/ecresource/deploymentresource/observability_test.go +++ b/ec/ecresource/deploymentresource/observability/v2/observability_payload_test.go @@ -15,11 +15,14 @@ // specific language governing permissions and limitations // under the License. -package deploymentresource +package v2 import ( + "context" "testing" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" "github.com/elastic/cloud-sdk-go/pkg/api" @@ -28,100 +31,9 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/util/ec" ) -func TestFlattenObservability(t *testing.T) { +func Test_observabilityPayload(t *testing.T) { type args struct { - settings *models.DeploymentSettings - } - tests := []struct { - name string - args args - want []interface{} - }{ - { - name: "flattens no observability settings when empty", - args: args{}, - }, - { - name: "flattens no observability settings when empty", - args: args{settings: &models.DeploymentSettings{}}, - }, - { - name: "flattens no observability settings when empty", - args: args{settings: &models.DeploymentSettings{Observability: &models.DeploymentObservabilitySettings{}}}, - }, - { - name: "flattens observability settings", - args: args{settings: &models.DeploymentSettings{ - Observability: &models.DeploymentObservabilitySettings{ - Logging: &models.DeploymentLoggingSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: &mock.ValidClusterID, - RefID: "main-elasticsearch", - }, - }, - }, - }}, - want: []interface{}{map[string]interface{}{ - "deployment_id": &mock.ValidClusterID, - "ref_id": "main-elasticsearch", - "logs": true, - }}, - }, - { - name: "flattens observability settings", - args: args{settings: &models.DeploymentSettings{ - Observability: &models.DeploymentObservabilitySettings{ - Metrics: &models.DeploymentMetricsSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: &mock.ValidClusterID, - RefID: "main-elasticsearch", - }, - }, - }, - }}, - want: []interface{}{map[string]interface{}{ - "deployment_id": &mock.ValidClusterID, - "ref_id": "main-elasticsearch", - "metrics": true, - }}, - }, - { - name: "flattens observability settings", - args: args{settings: &models.DeploymentSettings{ - Observability: &models.DeploymentObservabilitySettings{ - Logging: &models.DeploymentLoggingSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: &mock.ValidClusterID, - RefID: "main-elasticsearch", - }, - }, - Metrics: &models.DeploymentMetricsSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: &mock.ValidClusterID, - RefID: "main-elasticsearch", - }, - }, - }, - }}, - want: []interface{}{map[string]interface{}{ - "deployment_id": &mock.ValidClusterID, - "ref_id": "main-elasticsearch", - "logs": true, - "metrics": true, - }}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := flattenObservability(tt.args.settings) - assert.Equal(t, tt.want, got) - }) - } -} - -func TestExpandObservability(t *testing.T) { - type args struct { - v []interface{} + observability *Observability *api.API } tests := []struct { @@ -136,12 +48,12 @@ func TestExpandObservability(t *testing.T) { { name: "expands all observability settings with given refID", args: args{ - v: []interface{}{map[string]interface{}{ - "deployment_id": mock.ValidClusterID, - "ref_id": "main-elasticsearch", - "metrics": true, - "logs": true, - }}, + observability: &Observability{ + DeploymentId: &mock.ValidClusterID, + RefId: ec.String("main-elasticsearch"), + Metrics: true, + Logs: true, + }, }, want: &models.DeploymentObservabilitySettings{ Logging: &models.DeploymentLoggingSettings{ @@ -175,11 +87,11 @@ func TestExpandObservability(t *testing.T) { }), ), ), - v: []interface{}{map[string]interface{}{ - "deployment_id": mock.ValidClusterID, - "metrics": true, - "logs": true, - }}, + observability: &Observability{ + DeploymentId: &mock.ValidClusterID, + Metrics: true, + Logs: true, + }, }, want: &models.DeploymentObservabilitySettings{ Logging: &models.DeploymentLoggingSettings{ @@ -213,11 +125,11 @@ func TestExpandObservability(t *testing.T) { }), ), ), - v: []interface{}{map[string]interface{}{ - "deployment_id": mock.ValidClusterID, - "metrics": false, - "logs": true, - }}, + observability: &Observability{ + DeploymentId: &mock.ValidClusterID, + Metrics: false, + Logs: true, + }, }, want: &models.DeploymentObservabilitySettings{ Logging: &models.DeploymentLoggingSettings{ @@ -245,11 +157,11 @@ func TestExpandObservability(t *testing.T) { }), ), ), - v: []interface{}{map[string]interface{}{ - "deployment_id": mock.ValidClusterID, - "metrics": true, - "logs": false, - }}, + observability: &Observability{ + DeploymentId: &mock.ValidClusterID, + Metrics: true, + Logs: false, + }, }, want: &models.DeploymentObservabilitySettings{ Metrics: &models.DeploymentMetricsSettings{ @@ -277,11 +189,11 @@ func TestExpandObservability(t *testing.T) { }), ), ), - v: []interface{}{map[string]interface{}{ - "deployment_id": "self", - "metrics": true, - "logs": false, - }}, + observability: &Observability{ + DeploymentId: ec.String("self"), + Metrics: true, + Logs: false, + }, }, want: &models.DeploymentObservabilitySettings{ Metrics: &models.DeploymentMetricsSettings{ @@ -309,12 +221,12 @@ func TestExpandObservability(t *testing.T) { }), ), ), - v: []interface{}{map[string]interface{}{ - "deployment_id": "self", - "ref_id": "main-elasticsearch", - "metrics": true, - "logs": false, - }}, + observability: &Observability{ + DeploymentId: ec.String("self"), + RefId: ec.String("main-elasticsearch"), + Metrics: true, + Logs: false, + }, }, want: &models.DeploymentObservabilitySettings{ Metrics: &models.DeploymentMetricsSettings{ @@ -328,7 +240,12 @@ func TestExpandObservability(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, _ := expandObservability(tt.args.v, tt.args.API) + var observability types.Object + diags := tfsdk.ValueFrom(context.Background(), tt.args.observability, ObservabilitySchema().FrameworkType(), &observability) + assert.Nil(t, diags) + + got, diags := ObservabilityPayload(context.Background(), observability, tt.args.API) + assert.Nil(t, diags) assert.Equal(t, tt.want, got) }) } diff --git a/ec/ecresource/deploymentresource/observability/v2/observability_read_test.go b/ec/ecresource/deploymentresource/observability/v2/observability_read_test.go new file mode 100644 index 000000000..31c573713 --- /dev/null +++ b/ec/ecresource/deploymentresource/observability/v2/observability_read_test.go @@ -0,0 +1,127 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" + + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" +) + +func Test_readObservability(t *testing.T) { + type args struct { + settings *models.DeploymentSettings + } + tests := []struct { + name string + args args + want *Observability + }{ + { + name: "flattens no observability settings when empty", + args: args{}, + }, + { + name: "flattens no observability settings when empty", + args: args{settings: &models.DeploymentSettings{}}, + }, + { + name: "flattens no observability settings when empty", + args: args{settings: &models.DeploymentSettings{Observability: &models.DeploymentObservabilitySettings{}}}, + }, + { + name: "flattens observability settings", + args: args{settings: &models.DeploymentSettings{ + Observability: &models.DeploymentObservabilitySettings{ + Logging: &models.DeploymentLoggingSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: &mock.ValidClusterID, + RefID: "main-elasticsearch", + }, + }, + }, + }}, + want: &Observability{ + DeploymentId: &mock.ValidClusterID, + RefId: ec.String("main-elasticsearch"), + Logs: true, + }, + }, + { + name: "flattens observability settings", + args: args{settings: &models.DeploymentSettings{ + Observability: &models.DeploymentObservabilitySettings{ + Metrics: &models.DeploymentMetricsSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: &mock.ValidClusterID, + RefID: "main-elasticsearch", + }, + }, + }, + }}, + want: &Observability{ + DeploymentId: &mock.ValidClusterID, + RefId: ec.String("main-elasticsearch"), + Metrics: true, + }, + }, + { + name: "flattens observability settings", + args: args{settings: &models.DeploymentSettings{ + Observability: &models.DeploymentObservabilitySettings{ + Logging: &models.DeploymentLoggingSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: &mock.ValidClusterID, + RefID: "main-elasticsearch", + }, + }, + Metrics: &models.DeploymentMetricsSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: &mock.ValidClusterID, + RefID: "main-elasticsearch", + }, + }, + }, + }}, + want: &Observability{ + DeploymentId: &mock.ValidClusterID, + RefId: ec.String("main-elasticsearch"), + Logs: true, + Metrics: true, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + observability, err := ReadObservability(tt.args.settings) + assert.Nil(t, err) + assert.Equal(t, tt.want, observability) + + var observabilityTF types.Object + diags := tfsdk.ValueFrom(context.Background(), observability, ObservabilitySchema().FrameworkType(), &observabilityTF) + assert.Nil(t, diags) + }) + } +} diff --git a/ec/ecresource/deploymentresource/observability/v2/schema.go b/ec/ecresource/deploymentresource/observability/v2/schema.go new file mode 100644 index 000000000..21dcd4b42 --- /dev/null +++ b/ec/ecresource/deploymentresource/observability/v2/schema.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func ObservabilitySchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional observability settings. Ship logs and metrics to a dedicated deployment.", + Optional: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "deployment_id": { + Type: types.StringType, + Required: true, + }, + "ref_id": { + Type: types.StringType, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "logs": { + Type: types.BoolType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.Bool{Value: true}), + resource.UseStateForUnknown(), + }, + }, + "metrics": { + Type: types.BoolType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.Bool{Value: true}), + resource.UseStateForUnknown(), + }, + }, + }), + } +} diff --git a/ec/ecresource/deploymentresource/read.go b/ec/ecresource/deploymentresource/read.go index 8b0145df3..d27388c03 100644 --- a/ec/ecresource/deploymentresource/read.go +++ b/ec/ecresource/deploymentresource/read.go @@ -20,26 +20,61 @@ package deploymentresource import ( "context" "errors" + "fmt" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/blang/semver" "github.com/elastic/cloud-sdk-go/pkg/api/apierror" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deputil" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/esremoteclustersapi" "github.com/elastic/cloud-sdk-go/pkg/client/deployments" "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/multierror" + deploymentv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/deployment/v2" + elasticsearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" ) -// Read queries the remote deployment state and updates the local state. -func readResource(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*api.API) +func (r *Resource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + if !r.ready(&response.Diagnostics) { + return + } + + var curState deploymentv2.DeploymentTF + + diags := request.State.Get(ctx, &curState) + response.Diagnostics.Append(diags...) + + if response.Diagnostics.HasError() { + return + } + + var newState *deploymentv2.DeploymentTF + var err error + + // use state for the plan (there is no plan and config during Read) - otherwise we can get unempty plan output + if newState, diags = r.read(ctx, curState.Id.Value, &curState, curState, nil); err != nil { + response.Diagnostics.Append(diags...) + } + + if newState == nil { + response.State.RemoveResource(ctx) + } + + if newState != nil { + diags = response.State.Set(ctx, newState) + } + + response.Diagnostics.Append(diags...) +} + +func (r *Resource) read(ctx context.Context, id string, state *deploymentv2.DeploymentTF, plan deploymentv2.DeploymentTF, deploymentResources []*models.DeploymentResource) (*deploymentv2.DeploymentTF, diag.Diagnostics) { + var diags diag.Diagnostics - res, err := deploymentapi.Get(deploymentapi.GetParams{ - API: client, DeploymentID: d.Id(), + response, err := deploymentapi.Get(deploymentapi.GetParams{ + API: r.client, DeploymentID: id, QueryParams: deputil.QueryParams{ ShowSettings: true, ShowPlans: true, @@ -49,37 +84,86 @@ func readResource(_ context.Context, d *schema.ResourceData, meta interface{}) d }) if err != nil { if deploymentNotFound(err) { - d.SetId("") - return nil + diags.AddError("Deployment not found", err.Error()) + return nil, diags } - return diag.FromErr(multierror.NewPrefixed("failed reading deployment", err)) + diags.AddError("Deloyment get error", err.Error()) + return nil, diags } - if !hasRunningResources(res) { - d.SetId("") - return nil + if !utils.HasRunningResources(response) { + return nil, nil + } + + if response.Resources == nil || len(response.Resources.Elasticsearch) == 0 { + diags.AddError("Get resource error", "cannot find Elasticsearch in response resources") + return nil, diags + } + + if response.Resources.Elasticsearch[0].Info.PlanInfo.Current != nil && response.Resources.Elasticsearch[0].Info.PlanInfo.Current.Plan != nil { + if err := checkVersion(response.Resources.Elasticsearch[0].Info.PlanInfo.Current.Plan.Elasticsearch.Version); err != nil { + diags.AddError("Get resource error", err.Error()) + return nil, diags + } + } + + refId := "" + + var elasticsearchPlan *elasticsearchv2.ElasticsearchTF + + if diags = tfsdk.ValueAs(ctx, plan.Elasticsearch, &elasticsearchPlan); diags.HasError() { + return nil, diags + } + + if elasticsearchPlan != nil { + refId = elasticsearchPlan.RefId.Value } - var diags diag.Diagnostics remotes, err := esremoteclustersapi.Get(esremoteclustersapi.GetParams{ - API: client, DeploymentID: d.Id(), - RefID: d.Get("elasticsearch.0.ref_id").(string), + API: r.client, DeploymentID: id, + RefID: refId, }) if err != nil { - diags = append(diags, diag.FromErr( - multierror.NewPrefixed("failed reading remote clusters", err), - )...) + diags.AddError("Remote clusters read error", err.Error()) + return nil, diags } - if remotes == nil { remotes = &models.RemoteResources{} } - if err := modelToState(d, res, *remotes); err != nil { - diags = append(diags, diag.FromErr(err)...) + deployment, err := deploymentv2.ReadDeployment(response, remotes, deploymentResources) + if err != nil { + diags.AddError("Deployment read error", err.Error()) + return nil, diags + } + + deployment.RequestId = plan.RequestId.Value + + deployment.SetCredentialsIfEmpty(state) + + deployment.ProcessSelfInObservability() + + deployment.NullifyNotUsedEsTopologies(ctx, elasticsearchPlan) + + // ReadDeployment returns empty config struct if there is no config, so we have to nullify it if plan doesn't contain it + // we use state for plan in Read and there is no state during import so we need to check elasticsearchPlan against nil + if elasticsearchPlan != nil && elasticsearchPlan.Config.IsNull() && deployment.Elasticsearch != nil && deployment.Elasticsearch.Config != nil && deployment.Elasticsearch.Config.IsEmpty() { + deployment.Elasticsearch.Config = nil + } + + var deploymentTF deploymentv2.DeploymentTF + + schema, diags := r.GetSchema(ctx) + + if diags.HasError() { + return nil, diags + } + + if diags := tfsdk.ValueFrom(ctx, deployment, schema.Type(), &deploymentTF); diags.HasError() { + return nil, diags } - return diags + return &deploymentTF, diags } func deploymentNotFound(err error) bool { @@ -93,3 +177,24 @@ func deploymentNotFound(err error) bool { // We also check for the case where a 403 is thrown for ESS. return apierror.IsRuntimeStatusCode(err, 403) } + +// Setting this variable here so that it is parsed at compile time in case +// any errors are thrown, they are at compile time not when the user runs it. +var minimumSupportedVersion = semver.MustParse("6.6.0") + +func checkVersion(version string) error { + v, err := semver.New(version) + + if err != nil { + return fmt.Errorf("unable to parse deployment version: %w", err) + } + + if v.LT(minimumSupportedVersion) { + return fmt.Errorf( + `invalid deployment version "%s": minimum supported version is "%s"`, + v.String(), minimumSupportedVersion.String(), + ) + } + + return nil +} diff --git a/ec/ecresource/deploymentresource/read_test.go b/ec/ecresource/deploymentresource/read_test.go deleted file mode 100644 index b54ff5523..000000000 --- a/ec/ecresource/deploymentresource/read_test.go +++ /dev/null @@ -1,199 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "context" - "testing" - - "github.com/go-openapi/runtime" - "github.com/stretchr/testify/assert" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/apierror" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/client/deployments" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -func Test_readResource(t *testing.T) { - tc500Err := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - wantTC500 := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - - tc404Err := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - - wantTC404 := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - wantTC404.SetId("") - - tc200Stopped := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - - wantTC200Stopped := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - wantTC200Stopped.SetId("") - - type args struct { - ctx context.Context - d *schema.ResourceData - meta interface{} - } - tests := []struct { - name string - args args - want diag.Diagnostics - wantRD *schema.ResourceData - }{ - { - name: "returns an error when it receives a 500", - args: args{ - d: tc500Err, - meta: api.NewMock(mock.NewErrorResponse(500, mock.APIError{ - Code: "some", Message: "message", - })), - }, - want: diag.Diagnostics{ - { - Severity: diag.Error, - Summary: "failed reading deployment: 1 error occurred:\n\t* api error: some: message\n\n", - }, - }, - wantRD: wantTC500, - }, - { - name: "returns nil and unsets the state when the error is known", - args: args{ - d: tc404Err, - meta: api.NewMock(mock.NewErrorResponse(404, mock.APIError{ - Code: "some", Message: "message", - })), - }, - want: nil, - wantRD: wantTC404, - }, - { - name: "returns nil and unsets the state when none of the deployment resources are running", - args: args{ - d: tc200Stopped, - meta: api.NewMock(mock.New200StructResponse(models.DeploymentGetResponse{ - Resources: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{{ - Info: &models.ElasticsearchClusterInfo{Status: ec.String("stopped")}, - }}, - }, - })), - }, - want: nil, - wantRD: wantTC200Stopped, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := readResource(tt.args.ctx, tt.args.d, tt.args.meta) - assert.Equal(t, tt.want, got) - var want interface{} - if tt.wantRD != nil { - if s := tt.wantRD.State(); s != nil { - want = s.Attributes - } - } - - var gotState interface{} - if s := tt.args.d.State(); s != nil { - gotState = s.Attributes - } - - assert.Equal(t, want, gotState) - }) - } -} - -func Test_deploymentNotFound(t *testing.T) { - type args struct { - err error - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "When the error is empty, it returns false", - }, - { - name: "When the error is something else (500), it returns false", - args: args{ - err: &apierror.Error{Err: &runtime.APIError{Code: 500}}, - }, - }, - { - name: "When the error is something else (401), it returns false", - args: args{ - err: &apierror.Error{Err: &deployments.GetDeploymentUnauthorized{}}, - }, - }, - { - name: "When the deployment is not found, it returns true", - args: args{ - err: &apierror.Error{Err: &deployments.GetDeploymentNotFound{}}, - }, - want: true, - }, - { - name: "When the deployment is not authorized it returns true, to account for the DR case (ESS)", - args: args{ - err: &apierror.Error{Err: &runtime.APIError{Code: 403}}, - }, - want: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := deploymentNotFound(tt.args.err); got != tt.want { - t.Errorf("deploymentNotFound() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/ec/ecresource/deploymentresource/resource.go b/ec/ecresource/deploymentresource/resource.go index 380ae7217..4624ae9e3 100644 --- a/ec/ecresource/deploymentresource/resource.go +++ b/ec/ecresource/deploymentresource/resource.go @@ -18,39 +18,51 @@ package deploymentresource import ( - "time" + "context" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api" + v2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/deployment/v2" + "github.com/elastic/terraform-provider-ec/ec/internal" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" ) -// Resource returns the ec_deployment resource schema. -func Resource() *schema.Resource { - return &schema.Resource{ - CreateContext: createResource, - ReadContext: readResource, - UpdateContext: updateResource, - DeleteContext: deleteResource, - - Schema: newSchema(), - - Description: "Elastic Cloud Deployment resource", - Importer: &schema.ResourceImporter{ - StateContext: importFunc, - }, - - Timeouts: &schema.ResourceTimeout{ - Default: schema.DefaultTimeout(40 * time.Minute), - Update: schema.DefaultTimeout(60 * time.Minute), - Delete: schema.DefaultTimeout(60 * time.Minute), - }, - - SchemaVersion: 1, - StateUpgraders: []schema.StateUpgrader{ - { - Type: resourceSchemaV0().CoreConfigSchema().ImpliedType(), - Upgrade: resourceStateUpgradeV0, - Version: 0, - }, - }, +// Ensure provider defined types fully satisfy framework interfaces +// var _ tpfprovider.ResourceType = DeploymentResourceType{} +var _ resource.ResourceWithImportState = &Resource{} + +type Resource struct { + client *api.API +} + +func (r *Resource) ready(dg *diag.Diagnostics) bool { + if r.client == nil { + dg.AddError( + "Unconfigured API Client", + "Expected configured API client. Please report this issue to the provider developers.", + ) + + return false } + return true +} + +func (r *Resource) Configure(ctx context.Context, request resource.ConfigureRequest, response *resource.ConfigureResponse) { + client, diags := internal.ConvertProviderData(request.ProviderData) + response.Diagnostics.Append(diags...) + r.client = client +} + +func (t *Resource) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { + return v2.DeploymentSchema(), nil +} + +func (r *Resource) Metadata(ctx context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { + response.TypeName = request.ProviderTypeName + "_deployment" +} + +func (r *Resource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) } diff --git a/ec/ecresource/deploymentresource/schema.go b/ec/ecresource/deploymentresource/schema.go deleted file mode 100644 index 770574618..000000000 --- a/ec/ecresource/deploymentresource/schema.go +++ /dev/null @@ -1,211 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -const ( - minimumKibanaSize = 1024 - minimumApmSize = 512 - minimumEnterpriseSearchSize = 2048 - minimumIntegrationsServerSize = 1024 - - minimumZoneCount = 1 -) - -// newSchema returns the schema for an "ec_deployment" resource. -func newSchema() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "alias": { - Type: schema.TypeString, - Description: "Optional deployment alias that affects the format of the resource URLs", - Optional: true, - Computed: true, - }, - "version": { - Type: schema.TypeString, - Description: "Required Elastic Stack version to use for all of the deployment resources", - Required: true, - }, - "region": { - Type: schema.TypeString, - Description: `Required ESS region where to create the deployment, for ECE environments "ece-region" must be set`, - Required: true, - ForceNew: true, - }, - "deployment_template_id": { - Type: schema.TypeString, - Description: "Required Deployment Template identifier to create the deployment from", - Required: true, - }, - "name": { - Type: schema.TypeString, - Description: "Optional name for the deployment", - Optional: true, - }, - "request_id": { - Type: schema.TypeString, - Description: "Optional request_id to set on the create operation, only use when previous create attempts return with an error and a request_id is returned as part of the error", - Optional: true, - }, - - // Computed ES Creds - "elasticsearch_username": { - Type: schema.TypeString, - Description: "Computed username obtained upon creating the Elasticsearch resource", - Computed: true, - }, - "elasticsearch_password": { - Type: schema.TypeString, - Description: "Computed password obtained upon creating the Elasticsearch resource", - Computed: true, - Sensitive: true, - }, - - // APM secret_token - "apm_secret_token": { - Type: schema.TypeString, - Computed: true, - Sensitive: true, - }, - - // Resources - "elasticsearch": { - Type: schema.TypeList, - Description: "Required Elasticsearch resource definition", - MaxItems: 1, - Required: true, - Elem: newElasticsearchResource(), - }, - "kibana": { - Type: schema.TypeList, - Description: "Optional Kibana resource definition", - Optional: true, - MaxItems: 1, - Elem: newKibanaResource(), - }, - "apm": { - Type: schema.TypeList, - Description: "Optional APM resource definition", - Optional: true, - MaxItems: 1, - Elem: newApmResource(), - }, - "integrations_server": { - Type: schema.TypeList, - Description: "Optional Integrations Server resource definition", - Optional: true, - MaxItems: 1, - Elem: newIntegrationsServerResource(), - }, - "enterprise_search": { - Type: schema.TypeList, - Description: "Optional Enterprise Search resource definition", - Optional: true, - MaxItems: 1, - Elem: newEnterpriseSearchResource(), - }, - - // Settings - "traffic_filter": { - Description: "Optional list of traffic filters to apply to this deployment.", - // This field is a TypeSet since the order of the items isn't - // important, but the unique list is. This prevents infinite loops - // for autogenerated IDs. - Type: schema.TypeSet, - Set: schema.HashString, - Optional: true, - MinItems: 1, - Elem: &schema.Schema{ - MinItems: 1, - Type: schema.TypeString, - }, - }, - "observability": { - Type: schema.TypeList, - Description: "Optional observability settings. Ship logs and metrics to a dedicated deployment.", - Optional: true, - MaxItems: 1, - Elem: newObservabilitySettings(), - }, - - "tags": { - Description: "Optional map of deployment tags", - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - } -} - -func newObservabilitySettings() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "deployment_id": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: func(k, oldValue, newValue string, d *schema.ResourceData) bool { - // The terraform config can contain 'self' as a deployment target - // However the API will return the actual deployment-id. - // This overrides 'self' with the deployment-id so the diff will work correctly. - var deploymentID = d.Id() - var mappedOldValue = mapSelfToDeploymentID(oldValue, deploymentID) - var mappedNewValue = mapSelfToDeploymentID(newValue, deploymentID) - - return mappedOldValue == mappedNewValue - }, - }, - "ref_id": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - "logs": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "metrics": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - }, - } -} - -func mapSelfToDeploymentID(value string, deploymentID string) string { - if value == "self" && deploymentID != "" { - // If the deployment has a deployment-id, replace 'self' with the deployment-id - return deploymentID - } - - return value -} - -// suppressMissingOptionalConfigurationBlock handles configuration block attributes in the following scenario: -// - The resource schema includes an optional configuration block with defaults -// - The API response includes those defaults to refresh into the Terraform state -// - The operator's configuration omits the optional configuration block -func suppressMissingOptionalConfigurationBlock(k, old, new string, d *schema.ResourceData) bool { - return old == "1" && new == "0" -} diff --git a/ec/ecresource/deploymentresource/schema_apm.go b/ec/ecresource/deploymentresource/schema_apm.go deleted file mode 100644 index 0fa90790f..000000000 --- a/ec/ecresource/deploymentresource/schema_apm.go +++ /dev/null @@ -1,141 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func newApmResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "elasticsearch_cluster_ref_id": { - Type: schema.TypeString, - Default: "main-elasticsearch", - Optional: true, - }, - "ref_id": { - Type: schema.TypeString, - Default: "main-apm", - Optional: true, - }, - "resource_id": { - Type: schema.TypeString, - Computed: true, - }, - "region": { - Type: schema.TypeString, - Computed: true, - }, - "http_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "https_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "topology": apmTopologySchema(), - - "config": apmConfig(), - - // TODO: Implement settings field. - // "settings": interface{} - }, - } -} - -func apmTopologySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_configuration_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "size": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - "size_resource": { - Type: schema.TypeString, - Description: `Optional size type, defaults to "memory".`, - Default: "memory", - Optional: true, - }, - "zone_count": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - }, - }, - }, - } -} - -func apmConfig() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, - Description: `Optionally define the Apm configuration options for the APM Server`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "docker_image": { - Type: schema.TypeString, - Description: "Optionally override the docker image the APM nodes will use. Note that this field will only work for internal users only.", - Optional: true, - }, - // APM System Settings - "debug_enabled": { - Type: schema.TypeBool, - Description: `Optionally enable debug mode for APM servers - defaults to false`, - Optional: true, - Default: false, - }, - - "user_settings_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - }, - }, - } -} diff --git a/ec/ecresource/deploymentresource/schema_elasticsearch.go b/ec/ecresource/deploymentresource/schema_elasticsearch.go deleted file mode 100644 index 5bd75ff94..000000000 --- a/ec/ecresource/deploymentresource/schema_elasticsearch.go +++ /dev/null @@ -1,549 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "bytes" - "fmt" - "strconv" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - - "github.com/elastic/cloud-sdk-go/pkg/util/slice" -) - -func newElasticsearchResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "autoscale": { - Type: schema.TypeString, - Description: `Enable or disable autoscaling. Defaults to the setting coming from the deployment template. Accepted values are "true" or "false".`, - Computed: true, - Optional: true, - ValidateFunc: func(i interface{}, s string) ([]string, []error) { - if _, err := strconv.ParseBool(i.(string)); err != nil { - return nil, []error{ - fmt.Errorf("failed parsing autoscale value: %w", err), - } - } - return nil, nil - }, - }, - - "ref_id": { - Type: schema.TypeString, - Description: "Optional ref_id to set on the Elasticsearch resource", - Default: "main-elasticsearch", - Optional: true, - }, - - // Computed attributes - "resource_id": { - Type: schema.TypeString, - Description: "The Elasticsearch resource unique identifier", - Computed: true, - }, - "region": { - Type: schema.TypeString, - Description: "The Elasticsearch resource region", - Computed: true, - }, - "cloud_id": { - Type: schema.TypeString, - Description: "The encoded Elasticsearch credentials to use in Beats or Logstash", - Computed: true, - }, - "http_endpoint": { - Type: schema.TypeString, - Description: "The Elasticsearch resource HTTP endpoint", - Computed: true, - }, - "https_endpoint": { - Type: schema.TypeString, - Description: "The Elasticsearch resource HTTPs endpoint", - Computed: true, - }, - - // Sub-objects - "topology": elasticsearchTopologySchema(), - - "config": elasticsearchConfig(), - - "remote_cluster": elasticsearchRemoteCluster(), - - "snapshot_source": newSnapshotSourceSettings(), - - "extension": newExtensionSchema(), - - "trust_account": newTrustAccountSchema(), - "trust_external": newTrustExternalSchema(), - - "strategy": newStrategySchema(), - }, - } -} - -func elasticsearchTopologySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - MinItems: 1, - Optional: true, - Computed: true, - Description: `Optional topology element which must be set once but can be set multiple times to compose complex topologies`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Description: `Required topology ID from the deployment template`, - Required: true, - }, - "instance_configuration_id": { - Type: schema.TypeString, - Description: `Computed Instance Configuration ID of the topology element`, - Computed: true, - }, - "size": { - Type: schema.TypeString, - Description: `Optional amount of memory per node in the "g" notation`, - Computed: true, - Optional: true, - }, - "size_resource": { - Type: schema.TypeString, - Description: `Optional size type, defaults to "memory".`, - Default: "memory", - Optional: true, - }, - "zone_count": { - Type: schema.TypeInt, - Description: `Optional number of zones that the Elasticsearch cluster will span. This is used to set HA`, - Computed: true, - Optional: true, - }, - "node_type_data": { - Type: schema.TypeString, - Description: `The node type for the Elasticsearch Topology element (data node)`, - Computed: true, - Optional: true, - }, - "node_type_master": { - Type: schema.TypeString, - Description: `The node type for the Elasticsearch Topology element (master node)`, - Computed: true, - Optional: true, - }, - "node_type_ingest": { - Type: schema.TypeString, - Description: `The node type for the Elasticsearch Topology element (ingest node)`, - Computed: true, - Optional: true, - }, - "node_type_ml": { - Type: schema.TypeString, - Description: `The node type for the Elasticsearch Topology element (machine learning node)`, - Computed: true, - Optional: true, - }, - "node_roles": { - Type: schema.TypeSet, - Set: schema.HashString, - Description: `The computed list of node roles for the current topology element`, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - "autoscaling": { - Type: schema.TypeList, - Description: "Optional Elasticsearch autoscaling settings, such a maximum and minimum size and resources.", - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "max_size_resource": { - Description: "Maximum resource type for the maximum autoscaling setting.", - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "max_size": { - Description: "Maximum size value for the maximum autoscaling setting.", - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "min_size_resource": { - Description: "Minimum resource type for the minimum autoscaling setting.", - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "min_size": { - Description: "Minimum size value for the minimum autoscaling setting.", - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "policy_override_json": { - Type: schema.TypeString, - Description: "Computed policy overrides set directly via the API or other clients.", - Computed: true, - }, - }, - }, - }, - - // Read only config block that is present in the provider to - // avoid unsetting already set 'topology.elasticsearch' in the - // deployment plan. - "config": { - Type: schema.TypeList, - Computed: true, - Description: `Computed read-only configuration to avoid unsetting plan settings from 'topology.elasticsearch'`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Settings - - // plugins maps to the `enabled_built_in_plugins` API setting. - "plugins": { - Type: schema.TypeSet, - Set: schema.HashString, - Description: "List of Elasticsearch supported plugins, which vary from version to version. Check the Stack Pack version to see which plugins are supported for each version. This is currently only available from the UI and [ecctl](https://www.elastic.co/guide/en/ecctl/master/ecctl_stack_list.html)", - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - // User settings - "user_settings_json": { - Type: schema.TypeString, - Description: `JSON-formatted user level "elasticsearch.yml" setting overrides`, - Computed: true, - }, - "user_settings_override_json": { - Type: schema.TypeString, - Description: `JSON-formatted admin (ECE) level "elasticsearch.yml" setting overrides`, - Computed: true, - }, - "user_settings_yaml": { - Type: schema.TypeString, - Description: `YAML-formatted user level "elasticsearch.yml" setting overrides`, - Computed: true, - }, - "user_settings_override_yaml": { - Type: schema.TypeString, - Description: `YAML-formatted admin (ECE) level "elasticsearch.yml" setting overrides`, - Computed: true, - }, - }, - }, - }, - }, - }, - } -} - -func elasticsearchConfig() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, - Description: `Optional Elasticsearch settings which will be applied to all topologies unless overridden on the topology element`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Settings - - "docker_image": { - Type: schema.TypeString, - Description: "Optionally override the docker image the Elasticsearch nodes will use. Note that this field will only work for internal users only.", - Optional: true, - }, - - // Ignored settings are: [ user_bundles and user_plugins ]. - // Adding support for them will allow users to specify - // "Extensions" as it is possible in the UI today. - // The implementation would differ between ECE and ESS. - - // plugins maps to the `enabled_built_in_plugins` API setting. - "plugins": { - Type: schema.TypeSet, - Set: schema.HashString, - Description: "List of Elasticsearch supported plugins, which vary from version to version. Check the Stack Pack version to see which plugins are supported for each version. This is currently only available from the UI and [ecctl](https://www.elastic.co/guide/en/ecctl/master/ecctl_stack_list.html)", - Optional: true, - Elem: &schema.Schema{ - MinItems: 1, - Type: schema.TypeString, - }, - }, - - // User settings - "user_settings_json": { - Type: schema.TypeString, - Description: `JSON-formatted user level "elasticsearch.yml" setting overrides`, - Optional: true, - }, - "user_settings_override_json": { - Type: schema.TypeString, - Description: `JSON-formatted admin (ECE) level "elasticsearch.yml" setting overrides`, - Optional: true, - }, - "user_settings_yaml": { - Type: schema.TypeString, - Description: `YAML-formatted user level "elasticsearch.yml" setting overrides`, - Optional: true, - }, - "user_settings_override_yaml": { - Type: schema.TypeString, - Description: `YAML-formatted admin (ECE) level "elasticsearch.yml" setting overrides`, - Optional: true, - }, - }, - }, - } -} - -func elasticsearchRemoteCluster() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Description: "Optional Elasticsearch remote clusters to configure for the Elasticsearch resource, can be set multiple times", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "deployment_id": { - Description: "Remote deployment ID", - Type: schema.TypeString, - ValidateFunc: validation.StringLenBetween(32, 32), - Required: true, - }, - "alias": { - Description: "Alias for this Cross Cluster Search binding", - Type: schema.TypeString, - ValidateFunc: validation.StringIsNotEmpty, - Required: true, - }, - "ref_id": { - Description: `Remote elasticsearch "ref_id", it is best left to the default value`, - Type: schema.TypeString, - Default: "main-elasticsearch", - Optional: true, - }, - "skip_unavailable": { - Description: "If true, skip the cluster during search when disconnected", - Type: schema.TypeBool, - Default: false, - Optional: true, - }, - }, - }, - } -} - -func newSnapshotSourceSettings() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Description: "Optional snapshot source settings. Restore data from a snapshot of another deployment.", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "source_elasticsearch_cluster_id": { - Description: "ID of the Elasticsearch cluster that will be used as the source of the snapshot", - Type: schema.TypeString, - Required: true, - }, - "snapshot_name": { - Description: "Name of the snapshot to restore. Use '__latest_success__' to get the most recent successful snapshot.", - Type: schema.TypeString, - Default: "__latest_success__", - Optional: true, - }, - }, - }, - } -} - -func newExtensionSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeSet, - Set: esExtensionHash, - Description: "Optional Elasticsearch extensions such as custom bundles or plugins.", - Optional: true, - MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Description: "Extension name.", - Type: schema.TypeString, - Required: true, - }, - "type": { - Description: "Extension type, only `bundle` or `plugin` are supported.", - Type: schema.TypeString, - Required: true, - ValidateFunc: func(val interface{}, _ string) ([]string, []error) { - t := val.(string) - if !slice.HasString([]string{"bundle", "plugin"}, t) { - return nil, []error{fmt.Errorf( - "invalid extension type %s: accepted values are bundle or plugin", - t, - )} - } - return nil, nil - }, - }, - "version": { - Description: "Elasticsearch compatibility version. Bundles should specify major or minor versions with wildcards, such as `7.*` or `*` but **plugins must use full version notation down to the patch level**, such as `7.10.1` and wildcards are not allowed.", - Type: schema.TypeString, - Required: true, - }, - "url": { - Description: "Bundle or plugin URL, the extension URL can be obtained from the `ec_deployment_extension..url` attribute or the API and cannot be a random HTTP address that is hosted elsewhere.", - Type: schema.TypeString, - Required: true, - }, - }, - }, - } -} - -func esExtensionHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(m["type"].(string)) - buf.WriteString(m["version"].(string)) - buf.WriteString(m["url"].(string)) - buf.WriteString(m["name"].(string)) - return schema.HashString(buf.String()) -} - -func newTrustAccountSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeSet, - Description: "Optional Elasticsearch account trust settings.", - Optional: true, - Computed: true, - Elem: accountResource(), - } -} - -func accountResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "account_id": { - Description: "The ID of the Account.", - Type: schema.TypeString, - Required: true, - }, - "trust_all": { - Description: "If true, all clusters in this account will by default be trusted and the `trust_allowlist` is ignored.", - Type: schema.TypeBool, - Required: true, - }, - "trust_allowlist": { - Description: "The list of clusters to trust. Only used when `trust_all` is false.", - Type: schema.TypeSet, - Set: schema.HashString, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - } -} - -func newTrustExternalSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeSet, - Description: "Optional Elasticsearch external trust settings.", - Optional: true, - Computed: true, - Elem: externalResource(), - } -} - -func externalResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "relationship_id": { - Description: "The ID of the external trust relationship.", - Type: schema.TypeString, - Required: true, - }, - "trust_all": { - Description: "If true, all clusters in this account will by default be trusted and the `trust_allowlist` is ignored.", - Type: schema.TypeBool, - Required: true, - }, - "trust_allowlist": { - Description: "The list of clusters to trust. Only used when `trust_all` is false.", - Type: schema.TypeSet, - Set: schema.HashString, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - } -} - -func newStrategySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Description: "Configuration strategy settings.", - Optional: true, - MaxItems: 1, - Elem: strategyResource(), - } -} - -func strategyResource() *schema.Resource { - validValues := strings.Join(strategiesList, ", ") - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Description: "Configuration strategy type " + validValues, - Type: schema.TypeString, - Required: true, - ValidateFunc: func(val interface{}, key string) (warns []string, errs []error) { - t := val.(string) - fmt.Printf("Validating %s in %v", t, validValues) - if !slice.HasString(strategiesList, t) { - errs = append(errs, fmt.Errorf(`invalid %s '%s': valid strategies are %v`, key, t, validValues)) - } - return - }, - // changes on this setting do not change the plan. - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - return true - }, - }, - }, - } -} diff --git a/ec/ecresource/deploymentresource/schema_enteprise_search.go b/ec/ecresource/deploymentresource/schema_enteprise_search.go deleted file mode 100644 index 8bca176eb..000000000 --- a/ec/ecresource/deploymentresource/schema_enteprise_search.go +++ /dev/null @@ -1,148 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func newEnterpriseSearchResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "elasticsearch_cluster_ref_id": { - Type: schema.TypeString, - Default: "main-elasticsearch", - Optional: true, - }, - "ref_id": { - Type: schema.TypeString, - Default: "main-enterprise_search", - Optional: true, - }, - "resource_id": { - Type: schema.TypeString, - Computed: true, - }, - "region": { - Type: schema.TypeString, - Computed: true, - }, - "http_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "https_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "topology": enterpriseSearchTopologySchema(), - - "config": enterpriseSearchConfig(), - - // TODO: Implement settings field. - // "settings": interface{} - }, - } -} - -func enterpriseSearchTopologySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_configuration_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "size": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - "size_resource": { - Type: schema.TypeString, - Description: `Optional size type, defaults to "memory".`, - Default: "memory", - Optional: true, - }, - "zone_count": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - }, - - // Node types - - "node_type_appserver": { - Type: schema.TypeBool, - Computed: true, - }, - "node_type_connector": { - Type: schema.TypeBool, - Computed: true, - }, - "node_type_worker": { - Type: schema.TypeBool, - Computed: true, - }, - }, - }, - } -} - -func enterpriseSearchConfig() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, - Description: `Optionally define the Enterprise Search configuration options for the Enterprise Search Server`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "docker_image": { - Type: schema.TypeString, - Description: "Optionally override the docker image the Enterprise Search nodes will use. Note that this field will only work for internal users only.", - Optional: true, - }, - "user_settings_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - }, - }, - } -} diff --git a/ec/ecresource/deploymentresource/schema_integrations_server.go b/ec/ecresource/deploymentresource/schema_integrations_server.go deleted file mode 100644 index a27c43da9..000000000 --- a/ec/ecresource/deploymentresource/schema_integrations_server.go +++ /dev/null @@ -1,140 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func newIntegrationsServerResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "elasticsearch_cluster_ref_id": { - Type: schema.TypeString, - Default: "main-elasticsearch", - Optional: true, - }, - "ref_id": { - Type: schema.TypeString, - Default: "main-integrations_server", - Optional: true, - }, - "resource_id": { - Type: schema.TypeString, - Computed: true, - }, - "region": { - Type: schema.TypeString, - Computed: true, - }, - "http_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "https_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "topology": IntegrationsServerTopologySchema(), - - "config": IntegrationsServerConfig(), - }, - } -} - -// IntegrationsServerTopologySchema is ... -func IntegrationsServerTopologySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_configuration_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "size": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - "size_resource": { - Type: schema.TypeString, - Description: `Optional size type, defaults to "memory".`, - Default: "memory", - Optional: true, - }, - "zone_count": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - }, - }, - }, - } -} - -// IntegrationsServerConfig returns the schema for an integrations server. -func IntegrationsServerConfig() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, - Description: `Optionally define the IntegrationsServer configuration options for the IntegrationsServer Server`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "docker_image": { - Type: schema.TypeString, - Description: "Optionally override the docker image the IntegrationsServer nodes will use. Note that this field will only work for internal users only.", - Optional: true, - }, - // IntegrationsServer System Settings - "debug_enabled": { - Type: schema.TypeBool, - Description: `Optionally enable debug mode for IntegrationsServer servers - defaults to false`, - Optional: true, - Default: false, - }, - - "user_settings_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - }, - }, - } -} diff --git a/ec/ecresource/deploymentresource/schema_kibana.go b/ec/ecresource/deploymentresource/schema_kibana.go deleted file mode 100644 index 68ef516d9..000000000 --- a/ec/ecresource/deploymentresource/schema_kibana.go +++ /dev/null @@ -1,130 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func newKibanaResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "elasticsearch_cluster_ref_id": { - Type: schema.TypeString, - Default: "main-elasticsearch", - Optional: true, - }, - "ref_id": { - Type: schema.TypeString, - Default: "main-kibana", - Optional: true, - }, - "resource_id": { - Type: schema.TypeString, - Computed: true, - }, - "region": { - Type: schema.TypeString, - Computed: true, - }, - "http_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "https_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "topology": kibanaTopologySchema(), - - "config": kibanaConfig(), - }, - } -} - -func kibanaTopologySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_configuration_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "size": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - "size_resource": { - Type: schema.TypeString, - Description: `Optional size type, defaults to "memory".`, - Default: "memory", - Optional: true, - }, - "zone_count": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - }, - }, - }, - } -} - -func kibanaConfig() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, - Description: `Optionally define the Kibana configuration options for the Kibana Server`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "docker_image": { - Type: schema.TypeString, - Description: "Optionally override the docker image the Kibana nodes will use. Note that this field will only work for internal users only.", - Optional: true, - }, - "user_settings_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - }, - }, - } -} diff --git a/ec/ecresource/deploymentresource/schema_v0.go b/ec/ecresource/deploymentresource/schema_v0.go deleted file mode 100644 index 0ecbb9080..000000000 --- a/ec/ecresource/deploymentresource/schema_v0.go +++ /dev/null @@ -1,704 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "context" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -func resourceStateUpgradeV0(_ context.Context, raw map[string]interface{}, m interface{}) (map[string]interface{}, error) { - for _, apm := range raw["apm"].([]interface{}) { - rawApm := apm.(map[string]interface{}) - delete(rawApm, "version") - } - - for _, es := range raw["elasticsearch"].([]interface{}) { - rawEs := es.(map[string]interface{}) - delete(rawEs, "version") - } - - for _, ess := range raw["enterprise_search"].([]interface{}) { - rawEss := ess.(map[string]interface{}) - delete(rawEss, "version") - } - - for _, kibana := range raw["kibana"].([]interface{}) { - rawKibana := kibana.(map[string]interface{}) - delete(rawKibana, "version") - } - - return raw, nil -} - -// Copy of the revision 0 of the deployment schema. -func resourceSchemaV0() *schema.Resource { - return &schema.Resource{Schema: map[string]*schema.Schema{ - "version": { - Type: schema.TypeString, - Description: "Required Elastic Stack version to use for all of the deployment resources", - Required: true, - }, - "region": { - Type: schema.TypeString, - Description: `Required ESS region where to create the deployment, for ECE environments "ece-region" must be set`, - Required: true, - ForceNew: true, - }, - "deployment_template_id": { - Type: schema.TypeString, - Description: "Required Deployment Template identifier to create the deployment from", - Required: true, - }, - "name": { - Type: schema.TypeString, - Description: "Optional name for the deployment", - Optional: true, - }, - "request_id": { - Type: schema.TypeString, - Description: "Optional request_id to set on the create operation, only use when previous create attempts return with an error and a request_id is returned as part of the error", - Optional: true, - }, - - // Computed ES Creds - "elasticsearch_username": { - Type: schema.TypeString, - Description: "Computed username obtained upon creating the Elasticsearch resource", - Computed: true, - }, - "elasticsearch_password": { - Type: schema.TypeString, - Description: "Computed password obtained upon creating the Elasticsearch resource", - Computed: true, - Sensitive: true, - }, - - // APM secret_token - "apm_secret_token": { - Type: schema.TypeString, - Computed: true, - Sensitive: true, - }, - - // Resources - "elasticsearch": { - Type: schema.TypeList, - Description: "Required Elasticsearch resource definition", - MaxItems: 1, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ref_id": { - Type: schema.TypeString, - Description: "Optional ref_id to set on the Elasticsearch resource", - Default: "main-elasticsearch", - Optional: true, - }, - - // Computed attributes - "resource_id": { - Type: schema.TypeString, - Description: "The Elasticsearch resource unique identifier", - Computed: true, - }, - "version": { - Type: schema.TypeString, - Description: "The Elasticsearch resource current version", - Computed: true, - }, - "region": { - Type: schema.TypeString, - Description: "The Elasticsearch resource region", - Computed: true, - }, - "cloud_id": { - Type: schema.TypeString, - Description: "The encoded Elasticsearch credentials to use in Beats or Logstash", - Computed: true, - }, - "http_endpoint": { - Type: schema.TypeString, - Description: "The Elasticsearch resource HTTP endpoint", - Computed: true, - }, - "https_endpoint": { - Type: schema.TypeString, - Description: "The Elasticsearch resource HTTPs endpoint", - Computed: true, - }, - - // Sub-objects - "topology": { - Type: schema.TypeList, - MinItems: 1, - Optional: true, - Computed: true, - Description: `Optional topology element which must be set once but can be set multiple times to compose complex topologies`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_configuration_id": { - Type: schema.TypeString, - Description: `Computed Instance Configuration ID of the topology element`, - Computed: true, - Optional: true, - }, - "size": { - Type: schema.TypeString, - Description: `Optional amount of memory per node in the "g" notation`, - Computed: true, - Optional: true, - }, - "size_resource": { - Type: schema.TypeString, - Description: `Optional size type, defaults to "memory".`, - Default: "memory", - Optional: true, - }, - "zone_count": { - Type: schema.TypeInt, - Description: `Optional number of zones that the Elasticsearch cluster will span. This is used to set HA`, - Computed: true, - Optional: true, - }, - "node_type_data": { - Type: schema.TypeString, - Description: `The node type for the Elasticsearch Topology element (data node)`, - Computed: true, - Optional: true, - }, - "node_type_master": { - Type: schema.TypeString, - Description: `The node type for the Elasticsearch Topology element (master node)`, - Computed: true, - Optional: true, - }, - "node_type_ingest": { - Type: schema.TypeString, - Description: `The node type for the Elasticsearch Topology element (ingest node)`, - Computed: true, - Optional: true, - }, - "node_type_ml": { - Type: schema.TypeString, - Description: `The node type for the Elasticsearch Topology element (machine learning node)`, - Computed: true, - Optional: true, - }, - }, - }, - }, - - "config": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, - Description: `Optional Elasticsearch settings which will be applied to all topologies unless overridden on the topology element`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Settings - - // Ignored settings are: [ user_bundles and user_plugins ]. - // Adding support for them will allow users to specify - // "Extensions" as it is possible in the UI today. - // The implementation would differ between ECE and ESS. - - // plugins maps to the `enabled_built_in_plugins` API setting. - "plugins": { - Type: schema.TypeSet, - Set: schema.HashString, - Description: "List of Elasticsearch supported plugins, which vary from version to version. Check the Stack Pack version to see which plugins are supported for each version. This is currently only available from the UI and [ecctl](https://www.elastic.co/guide/en/ecctl/master/ecctl_stack_list.html)", - Optional: true, - MinItems: 1, - Elem: &schema.Schema{ - MinItems: 1, - Type: schema.TypeString, - }, - }, - - // User settings - "user_settings_json": { - Type: schema.TypeString, - Description: `JSON-formatted user level "elasticsearch.yml" setting overrides`, - Optional: true, - }, - "user_settings_override_json": { - Type: schema.TypeString, - Description: `JSON-formatted admin (ECE) level "elasticsearch.yml" setting overrides`, - Optional: true, - }, - "user_settings_yaml": { - Type: schema.TypeString, - Description: `YAML-formatted user level "elasticsearch.yml" setting overrides`, - Optional: true, - }, - "user_settings_override_yaml": { - Type: schema.TypeString, - Description: `YAML-formatted admin (ECE) level "elasticsearch.yml" setting overrides`, - Optional: true, - }, - }, - }, - }, - - "remote_cluster": { - Type: schema.TypeList, - Optional: true, - MinItems: 1, - Description: "Optional Elasticsearch remote clusters to configure for the Elasticsearch resource, can be set multiple times", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "deployment_id": { - Description: "Remote deployment ID", - Type: schema.TypeString, - ValidateFunc: validation.StringLenBetween(32, 32), - Required: true, - }, - "alias": { - Description: "Alias for this Cross Cluster Search binding", - Type: schema.TypeString, - ValidateFunc: validation.StringIsNotEmpty, - Optional: true, - }, - "ref_id": { - Description: `Remote elasticsearch "ref_id", it is best left to the default value`, - Type: schema.TypeString, - Default: "main-elasticsearch", - Optional: true, - }, - "skip_unavailable": { - Description: "If true, skip the cluster during search when disconnected", - Type: schema.TypeBool, - Default: false, - Optional: true, - }, - }, - }, - }, - - "snapshot_source": { - Type: schema.TypeList, - Description: "Optional snapshot source settings. Restore data from a snapshot of another deployment.", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "source_elasticsearch_cluster_id": { - Description: "ID of the Elasticsearch cluster that will be used as the source of the snapshot", - Type: schema.TypeString, - Required: true, - }, - "snapshot_name": { - Description: "Name of the snapshot to restore. Use '__latest_success__' to get the most recent successful snapshot.", - Type: schema.TypeString, - Default: "__latest_success__", - Optional: true, - }, - }, - }, - }, - }, - }, - }, - "kibana": { - Type: schema.TypeList, - Description: "Optional Kibana resource definition", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "elasticsearch_cluster_ref_id": { - Type: schema.TypeString, - Default: "main-elasticsearch", - Optional: true, - }, - "ref_id": { - Type: schema.TypeString, - Default: "main-kibana", - Optional: true, - }, - "resource_id": { - Type: schema.TypeString, - Computed: true, - }, - "version": { - Type: schema.TypeString, - Computed: true, - }, - "region": { - Type: schema.TypeString, - Computed: true, - }, - "http_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "https_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "topology": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_configuration_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "size": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - "size_resource": { - Type: schema.TypeString, - Description: `Optional size type, defaults to "memory".`, - Default: "memory", - Optional: true, - }, - "zone_count": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - }, - }, - }, - }, - - "config": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, - Description: `Optionally define the Kibana configuration options for the Kibana Server`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "user_settings_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - "apm": { - Type: schema.TypeList, - Description: "Optional APM resource definition", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "elasticsearch_cluster_ref_id": { - Type: schema.TypeString, - Default: "main-elasticsearch", - Optional: true, - }, - "ref_id": { - Type: schema.TypeString, - Default: "main-apm", - Optional: true, - }, - "resource_id": { - Type: schema.TypeString, - Computed: true, - }, - "version": { - Type: schema.TypeString, - Computed: true, - }, - "region": { - Type: schema.TypeString, - Computed: true, - }, - "http_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "https_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "topology": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_configuration_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "size": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - "size_resource": { - Type: schema.TypeString, - Description: `Optional size type, defaults to "memory".`, - Default: "memory", - Optional: true, - }, - "zone_count": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - }, - }, - }, - }, - - "config": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, - Description: `Optionally define the Apm configuration options for the APM Server`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // APM System Settings - "debug_enabled": { - Type: schema.TypeBool, - Description: `Optionally enable debug mode for APM servers - defaults to false`, - Optional: true, - Default: false, - }, - - "user_settings_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - "enterprise_search": { - Type: schema.TypeList, - Description: "Optional Enterprise Search resource definition", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "elasticsearch_cluster_ref_id": { - Type: schema.TypeString, - Default: "main-elasticsearch", - Optional: true, - }, - "ref_id": { - Type: schema.TypeString, - Default: "main-enterprise_search", - Optional: true, - }, - "resource_id": { - Type: schema.TypeString, - Computed: true, - }, - "version": { - Type: schema.TypeString, - Computed: true, - }, - "region": { - Type: schema.TypeString, - Computed: true, - }, - "http_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "https_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "topology": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_configuration_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "size": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - "size_resource": { - Type: schema.TypeString, - Description: `Optional size type, defaults to "memory".`, - Default: "memory", - Optional: true, - }, - "zone_count": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - }, - - // Node types - - "node_type_appserver": { - Type: schema.TypeBool, - Computed: true, - }, - "node_type_connector": { - Type: schema.TypeBool, - Computed: true, - }, - "node_type_worker": { - Type: schema.TypeBool, - Computed: true, - }, - }, - }, - }, - - "config": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, - Description: `Optionally define the Enterprise Search configuration options for the Enterprise Search Server`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "user_settings_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - - // Settings - "traffic_filter": { - Description: "Optional list of traffic filters to apply to this deployment.", - // This field is a TypeSet since the order of the items isn't - // important, but the unique list is. This prevents infinite loops - // for autogenerated IDs. - Type: schema.TypeSet, - Set: schema.HashString, - Optional: true, - MinItems: 1, - Elem: &schema.Schema{ - MinItems: 1, - Type: schema.TypeString, - }, - }, - "observability": { - Type: schema.TypeList, - Description: "Optional observability settings. Ship logs and metrics to a dedicated deployment.", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "deployment_id": { - Type: schema.TypeString, - Required: true, - }, - "ref_id": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - "logs": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "metrics": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - }, - }, - }, - "tags": { - Description: "Optional map of deployment tags", - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }} -} diff --git a/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-create-expected-payload.json b/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-create-expected-payload.json new file mode 100644 index 000000000..2e7fcbc29 --- /dev/null +++ b/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-create-expected-payload.json @@ -0,0 +1,214 @@ +{ + "metadata": { + "tags": [] + }, + "name": "my_deployment_name", + "resources": { + "apm": null, + "appsearch": null, + "elasticsearch": [ + { + "plan": { + "autoscaling_enabled": false, + "cluster_topology": [ + { + "id": "coordinating", + "instance_configuration_id": "aws.coordinating.m5d", + "node_roles": [ + "ingest", + "remote_cluster_client" + ], + "size": { + "resource": "memory", + "value": 0 + }, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "zone_count": 2 + }, + { + "autoscaling_max": { + "resource": "memory", + "value": 118784 + }, + "elasticsearch": { + "node_attributes": { + "data": "hot" + } + }, + "id": "hot_content", + "instance_configuration_id": "aws.data.highio.i3", + "node_roles": [ + "master", + "ingest", + "transform", + "data_hot", + "remote_cluster_client", + "data_content" + ], + "size": { + "resource": "memory", + "value": 8192 + }, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 1024 + } + }, + "zone_count": 2 + }, + { + "autoscaling_max": { + "resource": "memory", + "value": 118784 + }, + "elasticsearch": { + "node_attributes": { + "data": "warm" + } + }, + "id": "warm", + "instance_configuration_id": "aws.data.highstorage.d3", + "node_roles": [ + "data_warm", + "remote_cluster_client" + ], + "size": { + "resource": "memory", + "value": 0 + }, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "zone_count": 2 + }, + { + "autoscaling_max": { + "resource": "memory", + "value": 59392 + }, + "elasticsearch": { + "node_attributes": { + "data": "cold" + } + }, + "id": "cold", + "instance_configuration_id": "aws.data.highstorage.d3", + "node_roles": [ + "data_cold", + "remote_cluster_client" + ], + "size": { + "resource": "memory", + "value": 0 + }, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "zone_count": 1 + }, + { + "autoscaling_max": { + "resource": "memory", + "value": 122880 + }, + "elasticsearch": { + "node_attributes": { + "data": "frozen" + } + }, + "id": "frozen", + "instance_configuration_id": "aws.es.datafrozen.i3en", + "node_roles": [ + "data_frozen" + ], + "size": { + "resource": "memory", + "value": 0 + }, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "zone_count": 1 + }, + { + "id": "master", + "instance_configuration_id": "aws.master.r5d", + "node_roles": [ + "master", + "remote_cluster_client" + ], + "size": { + "resource": "memory", + "value": 0 + }, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "zone_count": 3 + }, + { + "autoscaling_max": { + "resource": "memory", + "value": 61440 + }, + "autoscaling_min": { + "resource": "memory", + "value": 0 + }, + "id": "ml", + "instance_configuration_id": "aws.ml.m5d", + "node_roles": [ + "ml", + "remote_cluster_client" + ], + "size": { + "resource": "memory", + "value": 0 + }, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "zone_count": 1 + } + ], + "deployment_template": { + "id": "aws-io-optimized-v2" + }, + "elasticsearch": { + "version": "8.4.3" + } + }, + "ref_id": "main-elasticsearch", + "region": "us-east-1", + "settings": { + "dedicated_masters_threshold": 6 + } + } + ], + "enterprise_search": null, + "integrations_server": null, + "kibana": null + }, + "settings": {} + } diff --git a/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-expected-deployment1.json b/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-expected-deployment1.json new file mode 100644 index 000000000..cfd220d12 --- /dev/null +++ b/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-expected-deployment1.json @@ -0,0 +1,767 @@ +{ + "name": "my_deployment_name", + "settings": { + "autoscaling_enabled": false + }, + "healthy": true, + "alias": "my-deployment-name", + "id": "accd2e61fa835a5a32bb6b2938ce91f3", + "resources": { + "enterprise_search": [], + "kibana": [], + "elasticsearch": [ + { + "info": { + "status": "started", + "associated_apm_clusters": [], + "associated_kibana_clusters": [], + "locked": false, + "links": {}, + "associated_enterprise_search_clusters": [], + "healthy": true, + "associated_appsearch_clusters": [], + "region": "us-east-1", + "snapshots": { + "healthy": true, + "count": 0, + "recent_success": false + }, + "cluster_name": "my_deployment_name", + "plan_info": { + "healthy": true, + "current": { + "attempt_end_time": "2022-10-06T09:47:29.673Z", + "warnings": [], + "healthy": true, + "source": { + "action": "deployments.create-deployment", + "date": "2022-10-06T09:45:59.875Z", + "user_id": "111111", + "facilitator": "adminconsole", + "remote_addresses": [ + "18.192.28.203", + "3.88.142.49" + ] + }, + "plan_attempt_log": [ + { + "status": "success", + "started": "2022-10-06T09:46:00.619Z", + "duration_in_millis": 17, + "completed": "2022-10-06T09:46:00.636Z", + "step_id": "plan-validator", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.640Z", + "duration_in_millis": 2, + "completed": "2022-10-06T09:46:00.642Z", + "step_id": "log-initial-plan-data", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.647Z", + "duration_in_millis": 3, + "completed": "2022-10-06T09:46:00.650Z", + "step_id": "detect-plan-strategy", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.654Z", + "duration_in_millis": 7, + "completed": "2022-10-06T09:46:00.661Z", + "step_id": "calculate-incremental-elasticsearch-change", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.666Z", + "duration_in_millis": 4, + "completed": "2022-10-06T09:46:00.670Z", + "step_id": "resolve-instances-acls", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.673Z", + "duration_in_millis": 4, + "completed": "2022-10-06T09:46:00.677Z", + "step_id": "validate-plan-safety", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.680Z", + "duration_in_millis": 3, + "completed": "2022-10-06T09:46:00.683Z", + "step_id": "validate-elasticsearch-plugin-versions", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.687Z", + "duration_in_millis": 11, + "completed": "2022-10-06T09:46:00.698Z", + "step_id": "ensure-shield-system-key", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.702Z", + "duration_in_millis": 76, + "completed": "2022-10-06T09:46:00.778Z", + "step_id": "ensure-app-auth-tokens", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.783Z", + "duration_in_millis": 1008, + "completed": "2022-10-06T09:46:01.791Z", + "step_id": "add-shield-user", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:01.806Z", + "duration_in_millis": 176, + "completed": "2022-10-06T09:46:01.982Z", + "step_id": "validate-plan-prerequisites", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:01.994Z", + "duration_in_millis": 42, + "completed": "2022-10-06T09:46:02.036Z", + "step_id": "suspend-snapshotting", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.086Z", + "duration_in_millis": 33, + "completed": "2022-10-06T09:46:02.119Z", + "step_id": "ensure-s3-resources", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.132Z", + "duration_in_millis": 18, + "completed": "2022-10-06T09:46:02.150Z", + "step_id": "get-snapshot-settings", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.163Z", + "duration_in_millis": 17, + "completed": "2022-10-06T09:46:02.180Z", + "step_id": "check-enterprise-license", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.209Z", + "duration_in_millis": 36, + "completed": "2022-10-06T09:46:02.245Z", + "step_id": "create-elasticsearch-instances", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.309Z", + "duration_in_millis": 239, + "completed": "2022-10-06T09:46:02.548Z", + "step_id": "generate-node-certificates", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.552Z", + "duration_in_millis": 174, + "completed": "2022-10-06T09:46:02.726Z", + "step_id": "allocate-instances", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.731Z", + "duration_in_millis": 4, + "completed": "2022-10-06T09:46:02.735Z", + "step_id": "override-instance-data", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.742Z", + "duration_in_millis": 7, + "completed": "2022-10-06T09:46:02.749Z", + "step_id": "update-initial-master-nodes", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.753Z", + "duration_in_millis": 8, + "completed": "2022-10-06T09:46:02.761Z", + "step_id": "seed-instances", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.764Z", + "duration_in_millis": 13, + "completed": "2022-10-06T09:46:02.777Z", + "step_id": "start-instances", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.780Z", + "duration_in_millis": 72645, + "completed": "2022-10-06T09:47:15.425Z", + "step_id": "wait-until-running", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:15.436Z", + "duration_in_millis": 30, + "completed": "2022-10-06T09:47:15.466Z", + "step_id": "wait-until-masters-elected", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:15.476Z", + "duration_in_millis": 25, + "completed": "2022-10-06T09:47:15.501Z", + "step_id": "verify-non-split", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:20.585Z", + "duration_in_millis": 8, + "completed": "2022-10-06T09:47:20.593Z", + "step_id": "set-maintenance", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:27.002Z", + "duration_in_millis": 119, + "completed": "2022-10-06T09:47:27.121Z", + "step_id": "apply-cluster-license", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:27.139Z", + "duration_in_millis": 1438, + "completed": "2022-10-06T09:47:28.577Z", + "step_id": "ensure-repository", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.588Z", + "duration_in_millis": 256, + "completed": "2022-10-06T09:47:28.844Z", + "step_id": "ensure-slm-policy", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.849Z", + "duration_in_millis": 41, + "completed": "2022-10-06T09:47:28.890Z", + "step_id": "apply-cluster-license", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.893Z", + "duration_in_millis": 5, + "completed": "2022-10-06T09:47:28.898Z", + "step_id": "apply-monitoring-config", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.902Z", + "duration_in_millis": 8, + "completed": "2022-10-06T09:47:28.910Z", + "step_id": "set-maintenance", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.915Z", + "duration_in_millis": 7, + "completed": "2022-10-06T09:47:28.922Z", + "step_id": "apply-curation-settings", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.927Z", + "duration_in_millis": 252, + "completed": "2022-10-06T09:47:29.179Z", + "step_id": "apply-plan-settings", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:29.379Z", + "duration_in_millis": 46, + "completed": "2022-10-06T09:47:29.425Z", + "step_id": "post-plan-cleanup", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:29.430Z", + "duration_in_millis": 19, + "completed": "2022-10-06T09:47:29.449Z", + "step_id": "clean-up", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:29.673Z", + "duration_in_millis": 0, + "completed": "2022-10-06T09:47:29.673Z", + "step_id": "plan-completed", + "info_log": [], + "stage": "completed" + } + ], + "plan": { + "autoscaling_enabled": false, + "cluster_topology": [ + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "instance_configuration_id": "aws.coordinating.m5d", + "node_roles": [ + "ingest", + "remote_cluster_client" + ], + "id": "coordinating", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 1024 + } + }, + "elasticsearch": { + "node_attributes": { + "data": "hot" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 118784 + }, + "instance_configuration_id": "aws.data.highio.i3", + "node_roles": [ + "master", + "ingest", + "transform", + "data_hot", + "remote_cluster_client", + "data_content" + ], + "id": "hot_content", + "size": { + "resource": "memory", + "value": 8192 + } + }, + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "node_attributes": { + "data": "warm" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 118784 + }, + "instance_configuration_id": "aws.data.highstorage.d3", + "node_roles": [ + "data_warm", + "remote_cluster_client" + ], + "id": "warm", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "node_attributes": { + "data": "cold" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 59392 + }, + "instance_configuration_id": "aws.data.highstorage.d3", + "node_roles": [ + "data_cold", + "remote_cluster_client" + ], + "id": "cold", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "node_attributes": { + "data": "frozen" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 122880 + }, + "instance_configuration_id": "aws.es.datafrozen.i3en", + "node_roles": [ + "data_frozen" + ], + "id": "frozen", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 3, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "instance_configuration_id": "aws.master.r5d", + "node_roles": [ + "master", + "remote_cluster_client" + ], + "id": "master", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "autoscaling_min": { + "resource": "memory", + "value": 0 + }, + "autoscaling_max": { + "resource": "memory", + "value": 61440 + }, + "instance_configuration_id": "aws.ml.m5d", + "node_roles": [ + "ml", + "remote_cluster_client" + ], + "id": "ml", + "size": { + "resource": "memory", + "value": 0 + } + } + ], + "elasticsearch": { + "version": "8.4.3" + }, + "deployment_template": { + "id": "aws-io-optimized-v2" + } + }, + "plan_attempt_id": "c656a76c-0567-4efc-84e0-ee317912a41e", + "attempt_start_time": "2022-10-06T09:46:00.619Z" + }, + "history": [] + }, + "cluster_id": "0589ddb3acee4641b95833022bf04d2b", + "external_links": [], + "elasticsearch": { + "healthy": true, + "cluster_blocking_issues": { + "healthy": true, + "blocks": [] + }, + "master_info": { + "healthy": true, + "instances_with_no_master": [], + "masters": [ + { + "instances": [ + "tiebreaker-0000000002", + "instance-0000000001", + "instance-0000000000" + ], + "master_node_id": "U3kdKRNCQ3ym06KKyojljQ", + "master_instance_name": "instance-0000000001" + } + ] + }, + "shards_status": { + "status": "green" + }, + "blocking_issues": { + "healthy": true, + "cluster_level": [], + "index_level": [] + }, + "shard_info": { + "healthy": true, + "unavailable_shards": [], + "unavailable_replicas": [], + "available_shards": [] + } + }, + "deployment_id": "accd2e61fa835a5a32bb6b2938ce91f3", + "topology": { + "healthy": true, + "instances": [ + { + "service_roles": [ + "ingest", + "master", + "data" + ], + "instance_set_id": "hot_content", + "zone": "us-east-1b", + "container_started": true, + "service_version": "8.4.3", + "healthy": true, + "maintenance_mode": false, + "instance_name": "instance-0000000000", + "logical_zone": "zone-0", + "instance_configuration": { + "resource": "memory", + "id": "aws.data.highio.i3", + "name": "aws.data.highio.i3" + }, + "memory": { + "instance_capacity": 8192, + "memory_pressure": 1 + }, + "disk": { + "disk_space_available": 245760, + "storage_multiplier": 30.0, + "disk_space_used": 117 + }, + "node_roles": [ + "master", + "ingest", + "transform", + "data_hot", + "remote_cluster_client", + "data_content" + ], + "allocator_id": "i-03b043eb9cee5566b", + "service_running": true + }, + { + "service_roles": [ + "ingest", + "master", + "data" + ], + "instance_set_id": "hot_content", + "zone": "us-east-1e", + "container_started": true, + "service_version": "8.4.3", + "healthy": true, + "maintenance_mode": false, + "instance_name": "instance-0000000001", + "logical_zone": "zone-1", + "instance_configuration": { + "resource": "memory", + "id": "aws.data.highio.i3", + "name": "aws.data.highio.i3" + }, + "memory": { + "instance_capacity": 8192, + "native_memory_pressure": 55 + }, + "disk": { + "disk_space_available": 245760, + "storage_multiplier": 30.0, + "disk_space_used": 0 + }, + "node_roles": [ + "master", + "ingest", + "transform", + "data_hot", + "remote_cluster_client", + "data_content" + ], + "allocator_id": "i-0af729d3a795a93a3", + "service_running": true + }, + { + "service_roles": [ + "master" + ], + "instance_set_id": "hot_content", + "zone": "us-east-1a", + "container_started": true, + "service_version": "8.4.3", + "healthy": true, + "maintenance_mode": false, + "instance_name": "tiebreaker-0000000002", + "logical_zone": "tiebreaker", + "instance_configuration": { + "resource": "memory", + "id": "aws.master.r5d", + "name": "aws.master.r5d" + }, + "memory": { + "instance_capacity": 1024, + "native_memory_pressure": 79 + }, + "disk": { + "disk_space_available": 2048, + "storage_multiplier": 2.0, + "disk_space_used": 0 + }, + "node_roles": [ + "master", + "voting_only" + ], + "allocator_id": "i-04712f4bbc8e7072e", + "service_running": true + } + ] + }, + "metadata": { + "endpoint": "0589ddb3acee4641b95833022bf04d2b.us-east-1.aws.found.io", + "sso_deep_linking_supported": false, + "last_modified": "2022-10-06T09:47:29.809Z", + "aliased_endpoint": "my-deployment-name.es.us-east-1.aws.found.io", + "ccr": true, + "version": 20, + "service_url": "https://0589ddb3acee4641b95833022bf04d2b.us-east-1.aws.found.io", + "aliased_url": "https://my-deployment-name.es.us-east-1.aws.found.io", + "ports": { + "transport_passthrough": 9400, + "http": 9200, + "https": 443 + }, + "cloud_id": "my_deployment_name:someCloudID" + } + }, + "region": "us-east-1", + "id": "0589ddb3acee4641b95833022bf04d2b", + "ref_id": "main-elasticsearch" + } + ], + "apm": [], + "appsearch": [], + "integrations_server": [] + }, + "metadata": { + "last_resource_plan_modified": "2022-10-06T09:47:29.673Z", + "tags": [], + "organization_id": "222222", + "last_modified": "2022-10-06T09:47:29.809Z", + "hidden": false, + "system_owned": false, + "owner_id": "111111" + } +} diff --git a/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-expected-deployment2.json b/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-expected-deployment2.json new file mode 100644 index 000000000..2956cff32 --- /dev/null +++ b/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-expected-deployment2.json @@ -0,0 +1,1313 @@ +{ + "name": "my_deployment_name", + "settings": { + "autoscaling_enabled": false + }, + "healthy": true, + "alias": "my-deployment-name", + "id": "accd2e61fa835a5a32bb6b2938ce91f3", + "resources": { + "enterprise_search": [], + "kibana": [], + "elasticsearch": [ + { + "info": { + "status": "started", + "associated_apm_clusters": [], + "associated_kibana_clusters": [], + "locked": false, + "links": {}, + "associated_enterprise_search_clusters": [], + "healthy": true, + "associated_appsearch_clusters": [], + "region": "us-east-1", + "snapshots": { + "healthy": true, + "count": 0, + "recent_success": false + }, + "cluster_name": "my_deployment_name", + "plan_info": { + "healthy": true, + "current": { + "attempt_end_time": "2022-10-06T09:47:29.673Z", + "warnings": [], + "healthy": true, + "source": { + "action": "deployments.create-deployment", + "date": "2022-10-06T09:45:59.875Z", + "user_id": "111111", + "facilitator": "adminconsole", + "remote_addresses": [ + "18.192.28.203", + "3.88.142.49" + ] + }, + "plan_attempt_log": [ + { + "status": "success", + "started": "2022-10-06T09:46:00.619Z", + "duration_in_millis": 17, + "completed": "2022-10-06T09:46:00.636Z", + "step_id": "plan-validator", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.640Z", + "duration_in_millis": 2, + "completed": "2022-10-06T09:46:00.642Z", + "step_id": "log-initial-plan-data", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.647Z", + "duration_in_millis": 3, + "completed": "2022-10-06T09:46:00.650Z", + "step_id": "detect-plan-strategy", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.654Z", + "duration_in_millis": 7, + "completed": "2022-10-06T09:46:00.661Z", + "step_id": "calculate-incremental-elasticsearch-change", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.666Z", + "duration_in_millis": 4, + "completed": "2022-10-06T09:46:00.670Z", + "step_id": "resolve-instances-acls", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.673Z", + "duration_in_millis": 4, + "completed": "2022-10-06T09:46:00.677Z", + "step_id": "validate-plan-safety", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.680Z", + "duration_in_millis": 3, + "completed": "2022-10-06T09:46:00.683Z", + "step_id": "validate-elasticsearch-plugin-versions", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.687Z", + "duration_in_millis": 11, + "completed": "2022-10-06T09:46:00.698Z", + "step_id": "ensure-shield-system-key", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.702Z", + "duration_in_millis": 76, + "completed": "2022-10-06T09:46:00.778Z", + "step_id": "ensure-app-auth-tokens", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.783Z", + "duration_in_millis": 1008, + "completed": "2022-10-06T09:46:01.791Z", + "step_id": "add-shield-user", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:01.806Z", + "duration_in_millis": 176, + "completed": "2022-10-06T09:46:01.982Z", + "step_id": "validate-plan-prerequisites", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:01.994Z", + "duration_in_millis": 42, + "completed": "2022-10-06T09:46:02.036Z", + "step_id": "suspend-snapshotting", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.086Z", + "duration_in_millis": 33, + "completed": "2022-10-06T09:46:02.119Z", + "step_id": "ensure-s3-resources", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.132Z", + "duration_in_millis": 18, + "completed": "2022-10-06T09:46:02.150Z", + "step_id": "get-snapshot-settings", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.163Z", + "duration_in_millis": 17, + "completed": "2022-10-06T09:46:02.180Z", + "step_id": "check-enterprise-license", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.209Z", + "duration_in_millis": 36, + "completed": "2022-10-06T09:46:02.245Z", + "step_id": "create-elasticsearch-instances", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.309Z", + "duration_in_millis": 239, + "completed": "2022-10-06T09:46:02.548Z", + "step_id": "generate-node-certificates", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.552Z", + "duration_in_millis": 174, + "completed": "2022-10-06T09:46:02.726Z", + "step_id": "allocate-instances", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.731Z", + "duration_in_millis": 4, + "completed": "2022-10-06T09:46:02.735Z", + "step_id": "override-instance-data", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.742Z", + "duration_in_millis": 7, + "completed": "2022-10-06T09:46:02.749Z", + "step_id": "update-initial-master-nodes", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.753Z", + "duration_in_millis": 8, + "completed": "2022-10-06T09:46:02.761Z", + "step_id": "seed-instances", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.764Z", + "duration_in_millis": 13, + "completed": "2022-10-06T09:46:02.777Z", + "step_id": "start-instances", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.780Z", + "duration_in_millis": 72645, + "completed": "2022-10-06T09:47:15.425Z", + "step_id": "wait-until-running", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:15.436Z", + "duration_in_millis": 30, + "completed": "2022-10-06T09:47:15.466Z", + "step_id": "wait-until-masters-elected", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:15.476Z", + "duration_in_millis": 25, + "completed": "2022-10-06T09:47:15.501Z", + "step_id": "verify-non-split", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:20.585Z", + "duration_in_millis": 8, + "completed": "2022-10-06T09:47:20.593Z", + "step_id": "set-maintenance", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:27.002Z", + "duration_in_millis": 119, + "completed": "2022-10-06T09:47:27.121Z", + "step_id": "apply-cluster-license", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:27.139Z", + "duration_in_millis": 1438, + "completed": "2022-10-06T09:47:28.577Z", + "step_id": "ensure-repository", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.588Z", + "duration_in_millis": 256, + "completed": "2022-10-06T09:47:28.844Z", + "step_id": "ensure-slm-policy", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.849Z", + "duration_in_millis": 41, + "completed": "2022-10-06T09:47:28.890Z", + "step_id": "apply-cluster-license", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.893Z", + "duration_in_millis": 5, + "completed": "2022-10-06T09:47:28.898Z", + "step_id": "apply-monitoring-config", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.902Z", + "duration_in_millis": 8, + "completed": "2022-10-06T09:47:28.910Z", + "step_id": "set-maintenance", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.915Z", + "duration_in_millis": 7, + "completed": "2022-10-06T09:47:28.922Z", + "step_id": "apply-curation-settings", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.927Z", + "duration_in_millis": 252, + "completed": "2022-10-06T09:47:29.179Z", + "step_id": "apply-plan-settings", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:29.379Z", + "duration_in_millis": 46, + "completed": "2022-10-06T09:47:29.425Z", + "step_id": "post-plan-cleanup", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:29.430Z", + "duration_in_millis": 19, + "completed": "2022-10-06T09:47:29.449Z", + "step_id": "clean-up", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:29.673Z", + "duration_in_millis": 0, + "completed": "2022-10-06T09:47:29.673Z", + "step_id": "plan-completed", + "info_log": [], + "stage": "completed" + } + ], + "plan": { + "autoscaling_enabled": false, + "cluster_topology": [ + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "instance_configuration_id": "aws.coordinating.m5d", + "node_roles": [ + "ingest", + "remote_cluster_client" + ], + "id": "coordinating", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 1024 + } + }, + "elasticsearch": { + "node_attributes": { + "data": "hot" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 118784 + }, + "instance_configuration_id": "aws.data.highio.i3", + "node_roles": [ + "master", + "ingest", + "transform", + "data_hot", + "remote_cluster_client", + "data_content" + ], + "id": "hot_content", + "size": { + "resource": "memory", + "value": 8192 + } + }, + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "node_attributes": { + "data": "warm" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 118784 + }, + "instance_configuration_id": "aws.data.highstorage.d3", + "node_roles": [ + "data_warm", + "remote_cluster_client" + ], + "id": "warm", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "node_attributes": { + "data": "cold" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 59392 + }, + "instance_configuration_id": "aws.data.highstorage.d3", + "node_roles": [ + "data_cold", + "remote_cluster_client" + ], + "id": "cold", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "node_attributes": { + "data": "frozen" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 122880 + }, + "instance_configuration_id": "aws.es.datafrozen.i3en", + "node_roles": [ + "data_frozen" + ], + "id": "frozen", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 3, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "instance_configuration_id": "aws.master.r5d", + "node_roles": [ + "master", + "remote_cluster_client" + ], + "id": "master", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "autoscaling_min": { + "resource": "memory", + "value": 0 + }, + "autoscaling_max": { + "resource": "memory", + "value": 61440 + }, + "instance_configuration_id": "aws.ml.m5d", + "node_roles": [ + "ml", + "remote_cluster_client" + ], + "id": "ml", + "size": { + "resource": "memory", + "value": 0 + } + } + ], + "elasticsearch": { + "version": "8.4.3" + }, + "deployment_template": { + "id": "aws-io-optimized-v2" + } + }, + "plan_attempt_id": "c656a76c-0567-4efc-84e0-ee317912a41e", + "attempt_start_time": "2022-10-06T09:46:00.619Z" + }, + "history": [ + { + "attempt_end_time": "2022-10-06T09:47:29.673Z", + "plan_attempt_id": "c656a76c-0567-4efc-84e0-ee317912a41e", + "warnings": [], + "healthy": true, + "source": { + "action": "deployments.create-deployment", + "date": "2022-10-06T09:45:59.875Z", + "user_id": "111111", + "facilitator": "adminconsole", + "remote_addresses": [ + "18.192.28.203", + "3.88.142.49" + ] + }, + "plan_attempt_log": [ + { + "status": "success", + "started": "2022-10-06T09:46:00.619Z", + "duration_in_millis": 17, + "completed": "2022-10-06T09:46:00.636Z", + "step_id": "plan-validator", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.640Z", + "duration_in_millis": 2, + "completed": "2022-10-06T09:46:00.642Z", + "step_id": "log-initial-plan-data", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.647Z", + "duration_in_millis": 3, + "completed": "2022-10-06T09:46:00.650Z", + "step_id": "detect-plan-strategy", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.654Z", + "duration_in_millis": 7, + "completed": "2022-10-06T09:46:00.661Z", + "step_id": "calculate-incremental-elasticsearch-change", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.666Z", + "duration_in_millis": 4, + "completed": "2022-10-06T09:46:00.670Z", + "step_id": "resolve-instances-acls", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.673Z", + "duration_in_millis": 4, + "completed": "2022-10-06T09:46:00.677Z", + "step_id": "validate-plan-safety", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.680Z", + "duration_in_millis": 3, + "completed": "2022-10-06T09:46:00.683Z", + "step_id": "validate-elasticsearch-plugin-versions", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.687Z", + "duration_in_millis": 11, + "completed": "2022-10-06T09:46:00.698Z", + "step_id": "ensure-shield-system-key", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.702Z", + "duration_in_millis": 76, + "completed": "2022-10-06T09:46:00.778Z", + "step_id": "ensure-app-auth-tokens", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.783Z", + "duration_in_millis": 1008, + "completed": "2022-10-06T09:46:01.791Z", + "step_id": "add-shield-user", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:01.806Z", + "duration_in_millis": 176, + "completed": "2022-10-06T09:46:01.982Z", + "step_id": "validate-plan-prerequisites", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:01.994Z", + "duration_in_millis": 42, + "completed": "2022-10-06T09:46:02.036Z", + "step_id": "suspend-snapshotting", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.086Z", + "duration_in_millis": 33, + "completed": "2022-10-06T09:46:02.119Z", + "step_id": "ensure-s3-resources", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.132Z", + "duration_in_millis": 18, + "completed": "2022-10-06T09:46:02.150Z", + "step_id": "get-snapshot-settings", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.163Z", + "duration_in_millis": 17, + "completed": "2022-10-06T09:46:02.180Z", + "step_id": "check-enterprise-license", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.209Z", + "duration_in_millis": 36, + "completed": "2022-10-06T09:46:02.245Z", + "step_id": "create-elasticsearch-instances", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.309Z", + "duration_in_millis": 239, + "completed": "2022-10-06T09:46:02.548Z", + "step_id": "generate-node-certificates", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.552Z", + "duration_in_millis": 174, + "completed": "2022-10-06T09:46:02.726Z", + "step_id": "allocate-instances", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.731Z", + "duration_in_millis": 4, + "completed": "2022-10-06T09:46:02.735Z", + "step_id": "override-instance-data", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.742Z", + "duration_in_millis": 7, + "completed": "2022-10-06T09:46:02.749Z", + "step_id": "update-initial-master-nodes", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.753Z", + "duration_in_millis": 8, + "completed": "2022-10-06T09:46:02.761Z", + "step_id": "seed-instances", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.764Z", + "duration_in_millis": 13, + "completed": "2022-10-06T09:46:02.777Z", + "step_id": "start-instances", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.780Z", + "duration_in_millis": 72645, + "completed": "2022-10-06T09:47:15.425Z", + "step_id": "wait-until-running", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:15.436Z", + "duration_in_millis": 30, + "completed": "2022-10-06T09:47:15.466Z", + "step_id": "wait-until-masters-elected", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:15.476Z", + "duration_in_millis": 25, + "completed": "2022-10-06T09:47:15.501Z", + "step_id": "verify-non-split", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:20.585Z", + "duration_in_millis": 8, + "completed": "2022-10-06T09:47:20.593Z", + "step_id": "set-maintenance", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:27.002Z", + "duration_in_millis": 119, + "completed": "2022-10-06T09:47:27.121Z", + "step_id": "apply-cluster-license", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:27.139Z", + "duration_in_millis": 1438, + "completed": "2022-10-06T09:47:28.577Z", + "step_id": "ensure-repository", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.588Z", + "duration_in_millis": 256, + "completed": "2022-10-06T09:47:28.844Z", + "step_id": "ensure-slm-policy", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.849Z", + "duration_in_millis": 41, + "completed": "2022-10-06T09:47:28.890Z", + "step_id": "apply-cluster-license", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.893Z", + "duration_in_millis": 5, + "completed": "2022-10-06T09:47:28.898Z", + "step_id": "apply-monitoring-config", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.902Z", + "duration_in_millis": 8, + "completed": "2022-10-06T09:47:28.910Z", + "step_id": "set-maintenance", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.915Z", + "duration_in_millis": 7, + "completed": "2022-10-06T09:47:28.922Z", + "step_id": "apply-curation-settings", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.927Z", + "duration_in_millis": 252, + "completed": "2022-10-06T09:47:29.179Z", + "step_id": "apply-plan-settings", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:29.379Z", + "duration_in_millis": 46, + "completed": "2022-10-06T09:47:29.425Z", + "step_id": "post-plan-cleanup", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:29.430Z", + "duration_in_millis": 19, + "completed": "2022-10-06T09:47:29.449Z", + "step_id": "clean-up", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:29.673Z", + "duration_in_millis": 0, + "completed": "2022-10-06T09:47:29.673Z", + "step_id": "plan-completed", + "info_log": [], + "stage": "completed" + } + ], + "plan": { + "autoscaling_enabled": false, + "cluster_topology": [ + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "instance_configuration_id": "aws.coordinating.m5d", + "node_roles": [ + "ingest", + "remote_cluster_client" + ], + "id": "coordinating", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 1024 + } + }, + "elasticsearch": { + "node_attributes": { + "data": "hot" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 118784 + }, + "instance_configuration_id": "aws.data.highio.i3", + "node_roles": [ + "master", + "ingest", + "transform", + "data_hot", + "remote_cluster_client", + "data_content" + ], + "id": "hot_content", + "size": { + "resource": "memory", + "value": 8192 + } + }, + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "node_attributes": { + "data": "warm" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 118784 + }, + "instance_configuration_id": "aws.data.highstorage.d3", + "node_roles": [ + "data_warm", + "remote_cluster_client" + ], + "id": "warm", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "node_attributes": { + "data": "cold" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 59392 + }, + "instance_configuration_id": "aws.data.highstorage.d3", + "node_roles": [ + "data_cold", + "remote_cluster_client" + ], + "id": "cold", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "node_attributes": { + "data": "frozen" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 122880 + }, + "instance_configuration_id": "aws.es.datafrozen.i3en", + "node_roles": [ + "data_frozen" + ], + "id": "frozen", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 3, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "instance_configuration_id": "aws.master.r5d", + "node_roles": [ + "master", + "remote_cluster_client" + ], + "id": "master", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "autoscaling_min": { + "resource": "memory", + "value": 0 + }, + "autoscaling_max": { + "resource": "memory", + "value": 61440 + }, + "instance_configuration_id": "aws.ml.m5d", + "node_roles": [ + "ml", + "remote_cluster_client" + ], + "id": "ml", + "size": { + "resource": "memory", + "value": 0 + } + } + ], + "elasticsearch": { + "version": "8.4.3" + }, + "deployment_template": { + "id": "aws-io-optimized-v2" + } + }, + "plan_attempt_name": "attempt-0000000000", + "attempt_start_time": "2022-10-06T09:46:00.636Z" + } + ] + }, + "cluster_id": "0589ddb3acee4641b95833022bf04d2b", + "external_links": [], + "elasticsearch": { + "healthy": true, + "cluster_blocking_issues": { + "healthy": true, + "blocks": [] + }, + "master_info": { + "healthy": true, + "instances_with_no_master": [], + "masters": [ + { + "instances": [ + "tiebreaker-0000000002", + "instance-0000000001", + "instance-0000000000" + ], + "master_node_id": "U3kdKRNCQ3ym06KKyojljQ", + "master_instance_name": "instance-0000000001" + } + ] + }, + "shards_status": { + "status": "green" + }, + "blocking_issues": { + "healthy": true, + "cluster_level": [], + "index_level": [] + }, + "shard_info": { + "healthy": true, + "unavailable_shards": [], + "unavailable_replicas": [], + "available_shards": [] + } + }, + "deployment_id": "accd2e61fa835a5a32bb6b2938ce91f3", + "topology": { + "healthy": true, + "instances": [ + { + "service_roles": [ + "ingest", + "master", + "data" + ], + "instance_set_id": "hot_content", + "zone": "us-east-1b", + "container_started": true, + "service_version": "8.4.3", + "healthy": true, + "maintenance_mode": false, + "instance_name": "instance-0000000000", + "logical_zone": "zone-0", + "instance_configuration": { + "resource": "memory", + "id": "aws.data.highio.i3", + "name": "aws.data.highio.i3" + }, + "memory": { + "instance_capacity": 8192, + "memory_pressure": 1 + }, + "disk": { + "disk_space_available": 245760, + "storage_multiplier": 30.0, + "disk_space_used": 117 + }, + "node_roles": [ + "master", + "ingest", + "transform", + "data_hot", + "remote_cluster_client", + "data_content" + ], + "allocator_id": "i-03b043eb9cee5566b", + "service_running": true + }, + { + "service_roles": [ + "ingest", + "master", + "data" + ], + "instance_set_id": "hot_content", + "zone": "us-east-1e", + "container_started": true, + "service_version": "8.4.3", + "healthy": true, + "maintenance_mode": false, + "instance_name": "instance-0000000001", + "logical_zone": "zone-1", + "instance_configuration": { + "resource": "memory", + "id": "aws.data.highio.i3", + "name": "aws.data.highio.i3" + }, + "memory": { + "instance_capacity": 8192, + "native_memory_pressure": 55 + }, + "disk": { + "disk_space_available": 245760, + "storage_multiplier": 30.0, + "disk_space_used": 0 + }, + "node_roles": [ + "master", + "ingest", + "transform", + "data_hot", + "remote_cluster_client", + "data_content" + ], + "allocator_id": "i-0af729d3a795a93a3", + "service_running": true + }, + { + "service_roles": [ + "master" + ], + "instance_set_id": "hot_content", + "zone": "us-east-1a", + "container_started": true, + "service_version": "8.4.3", + "healthy": true, + "maintenance_mode": false, + "instance_name": "tiebreaker-0000000002", + "logical_zone": "tiebreaker", + "instance_configuration": { + "resource": "memory", + "id": "aws.master.r5d", + "name": "aws.master.r5d" + }, + "memory": { + "instance_capacity": 1024, + "memory_pressure": 10, + "native_memory_pressure": 79 + }, + "disk": { + "disk_space_available": 2048, + "storage_multiplier": 2.0, + "disk_space_used": 0 + }, + "node_roles": [ + "master", + "voting_only" + ], + "allocator_id": "i-04712f4bbc8e7072e", + "service_running": true + } + ] + }, + "metadata": { + "endpoint": "0589ddb3acee4641b95833022bf04d2b.us-east-1.aws.found.io", + "sso_deep_linking_supported": false, + "last_modified": "2022-10-06T09:47:29.809Z", + "aliased_endpoint": "my-deployment-name.es.us-east-1.aws.found.io", + "ccr": true, + "version": 20, + "service_url": "https://0589ddb3acee4641b95833022bf04d2b.us-east-1.aws.found.io", + "aliased_url": "https://my-deployment-name.es.us-east-1.aws.found.io", + "ports": { + "transport_passthrough": 9400, + "http": 9200, + "https": 443 + }, + "cloud_id": "my_deployment_name:someCloudID" + } + }, + "region": "us-east-1", + "id": "0589ddb3acee4641b95833022bf04d2b", + "ref_id": "main-elasticsearch" + } + ], + "apm": [], + "appsearch": [], + "integrations_server": [] + }, + "metadata": { + "last_resource_plan_modified": "2022-10-06T09:47:29.673Z", + "tags": [], + "organization_id": "222222", + "last_modified": "2022-10-06T09:47:29.809Z", + "hidden": false, + "system_owned": false, + "owner_id": "1111" + } +} diff --git a/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-expected-deployment3.json b/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-expected-deployment3.json new file mode 100644 index 000000000..846e7f822 --- /dev/null +++ b/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-expected-deployment3.json @@ -0,0 +1,669 @@ +{ + "name": "my_deployment_name", + "settings": { + "autoscaling_enabled": false + }, + "healthy": true, + "alias": "my-deployment-name", + "id": "accd2e61fa835a5a32bb6b2938ce91f3", + "resources": { + "enterprise_search": [], + "kibana": [], + "elasticsearch": [ + { + "info": { + "status": "started", + "associated_apm_clusters": [], + "associated_kibana_clusters": [], + "locked": false, + "links": {}, + "associated_enterprise_search_clusters": [], + "settings": { + "trust": { + "accounts": [ + { + "trust_all": true, + "account_id": "222222", + "name": "Default trust for own organization" + } + ] + }, + "curation": { + "specs": [] + }, + "dedicated_masters_threshold": 6, + "snapshot": { + "slm": true, + "enabled": true, + "suspended": [], + "repository": { + "static": { + "repository_type": "s3-resource", + "settings": { + "aws_account": "operations-40-us-east-1", + "region": "us-east-1", + "bucket_name": "edf5c1f724604fe6b4ab7757509400c6", + "client_name": "elastic-internal-0589dd" + } + } + }, + "retention": {} + }, + "metadata": { + "name": "my_deployment_name", + "organization_id": "222222", + "subscription_level": "standard", + "hidden": false, + "system_owned": false, + "resources": { + "cpu": { + "boost": true, + "hard_limit": true + } + }, + "owner_id": "111111" + } + }, + "healthy": true, + "associated_appsearch_clusters": [], + "region": "us-east-1", + "snapshots": { + "healthy": true, + "count": 0, + "recent_success": false + }, + "cluster_name": "my_deployment_name", + "plan_info": { + "healthy": true, + "current": { + "attempt_end_time": "2022-10-06T09:47:29.673Z", + "warnings": [], + "healthy": true, + "source": { + "action": "deployments.create-deployment", + "date": "2022-10-06T09:45:59.875Z", + "user_id": "111111", + "facilitator": "adminconsole", + "remote_addresses": [ + "18.192.28.203", + "3.88.142.49" + ] + }, + "plan_attempt_log": [], + "plan": { + "autoscaling_enabled": false, + "cluster_topology": [ + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "system_settings": { + "reindex_whitelist": [], + "http": { + "cors_enabled": false, + "cors_allow_credentials": false, + "cors_max_age": 1728000, + "compression": true + }, + "monitoring_history_duration": "3d", + "monitoring_collection_interval": -1, + "destructive_requires_name": false, + "auto_create_index": true, + "scripting": { + "inline": { + "enabled": true + }, + "stored": { + "enabled": true + } + }, + "enable_close_index": true + } + }, + "instance_configuration_id": "aws.coordinating.m5d", + "node_roles": [ + "ingest", + "remote_cluster_client" + ], + "id": "coordinating", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 1024 + } + }, + "elasticsearch": { + "system_settings": { + "reindex_whitelist": [], + "http": { + "cors_enabled": false, + "cors_allow_credentials": false, + "cors_max_age": 1728000, + "compression": true + }, + "monitoring_history_duration": "3d", + "monitoring_collection_interval": -1, + "destructive_requires_name": false, + "auto_create_index": true, + "scripting": { + "inline": { + "enabled": true + }, + "stored": { + "enabled": true + } + }, + "enable_close_index": true + }, + "node_attributes": { + "data": "hot" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 118784 + }, + "instance_configuration_id": "aws.data.highio.i3", + "node_roles": [ + "master", + "ingest", + "transform", + "data_hot", + "remote_cluster_client", + "data_content" + ], + "id": "hot_content", + "size": { + "resource": "memory", + "value": 8192 + } + }, + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "system_settings": { + "reindex_whitelist": [], + "http": { + "cors_enabled": false, + "cors_allow_credentials": false, + "cors_max_age": 1728000, + "compression": true + }, + "monitoring_history_duration": "3d", + "monitoring_collection_interval": -1, + "destructive_requires_name": false, + "auto_create_index": true, + "scripting": { + "inline": { + "enabled": true + }, + "stored": { + "enabled": true + } + }, + "enable_close_index": true + }, + "node_attributes": { + "data": "warm" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 118784 + }, + "instance_configuration_id": "aws.data.highstorage.d3", + "node_roles": [ + "data_warm", + "remote_cluster_client" + ], + "id": "warm", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "system_settings": { + "reindex_whitelist": [], + "http": { + "cors_enabled": false, + "cors_allow_credentials": false, + "cors_max_age": 1728000, + "compression": true + }, + "monitoring_history_duration": "3d", + "monitoring_collection_interval": -1, + "destructive_requires_name": false, + "auto_create_index": true, + "scripting": { + "inline": { + "enabled": true + }, + "stored": { + "enabled": true + } + }, + "enable_close_index": true + }, + "node_attributes": { + "data": "cold" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 59392 + }, + "instance_configuration_id": "aws.data.highstorage.d3", + "node_roles": [ + "data_cold", + "remote_cluster_client" + ], + "id": "cold", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "system_settings": { + "reindex_whitelist": [], + "http": { + "cors_enabled": false, + "cors_allow_credentials": false, + "cors_max_age": 1728000, + "compression": true + }, + "monitoring_history_duration": "3d", + "monitoring_collection_interval": -1, + "destructive_requires_name": false, + "auto_create_index": true, + "scripting": { + "inline": { + "enabled": true + }, + "stored": { + "enabled": true + } + }, + "enable_close_index": true + }, + "node_attributes": { + "data": "frozen" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 122880 + }, + "instance_configuration_id": "aws.es.datafrozen.i3en", + "node_roles": [ + "data_frozen" + ], + "id": "frozen", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 3, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "system_settings": { + "reindex_whitelist": [], + "http": { + "cors_enabled": false, + "cors_allow_credentials": false, + "cors_max_age": 1728000, + "compression": true + }, + "monitoring_history_duration": "3d", + "monitoring_collection_interval": -1, + "destructive_requires_name": false, + "auto_create_index": true, + "scripting": { + "inline": { + "enabled": true + }, + "stored": { + "enabled": true + } + }, + "enable_close_index": true + } + }, + "instance_configuration_id": "aws.master.r5d", + "node_roles": [ + "master", + "remote_cluster_client" + ], + "id": "master", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "autoscaling_min": { + "resource": "memory", + "value": 0 + }, + "elasticsearch": { + "system_settings": { + "reindex_whitelist": [], + "http": { + "cors_enabled": false, + "cors_allow_credentials": false, + "cors_max_age": 1728000, + "compression": true + }, + "monitoring_history_duration": "3d", + "monitoring_collection_interval": -1, + "destructive_requires_name": false, + "auto_create_index": true, + "scripting": { + "inline": { + "enabled": true + }, + "stored": { + "enabled": true + } + }, + "enable_close_index": true + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 61440 + }, + "instance_configuration_id": "aws.ml.m5d", + "node_roles": [ + "ml", + "remote_cluster_client" + ], + "id": "ml", + "size": { + "resource": "memory", + "value": 0 + } + } + ], + "tiebreaker_topology": { + "memory_per_node": 1024 + }, + "transient": { + "plan_configuration": { + "move_allocators": [], + "skip_upgrade_checker": false, + "reallocate_instances": false, + "skip_post_upgrade_steps": false, + "skip_snapshot": false, + "preferred_allocators": [], + "skip_data_migration": false, + "calm_wait_time": 5, + "timeout": 32768, + "override_failsafe": false, + "move_only": false, + "extended_maintenance": false, + "max_snapshot_attempts": 3, + "move_instances": [], + "max_snapshot_age": 300, + "skip_snapshot_post_major_upgrade": false + }, + "strategy": { + "autodetect": {} + } + }, + "elasticsearch": { + "version": "8.4.3" + }, + "deployment_template": { + "id": "aws-io-optimized-v2" + } + }, + "plan_attempt_id": "c656a76c-0567-4efc-84e0-ee317912a41e", + "attempt_start_time": "2022-10-06T09:46:00.619Z" + }, + "history": [] + }, + "cluster_id": "0589ddb3acee4641b95833022bf04d2b", + "external_links": [], + "system_alerts": [], + "elasticsearch": { + "healthy": true, + "cluster_blocking_issues": { + "healthy": true, + "blocks": [] + }, + "master_info": { + "healthy": true, + "instances_with_no_master": [], + "masters": [ + { + "instances": [ + "tiebreaker-0000000002", + "instance-0000000001", + "instance-0000000000" + ], + "master_node_id": "U3kdKRNCQ3ym06KKyojljQ", + "master_instance_name": "instance-0000000001" + } + ] + }, + "shards_status": { + "status": "green" + }, + "blocking_issues": { + "healthy": true, + "cluster_level": [], + "index_level": [] + }, + "shard_info": { + "healthy": true, + "unavailable_shards": [], + "unavailable_replicas": [], + "available_shards": [] + } + }, + "deployment_id": "accd2e61fa835a5a32bb6b2938ce91f3", + "topology": { + "healthy": true, + "instances": [ + { + "service_roles": [ + "ingest", + "master", + "data" + ], + "instance_set_id": "hot_content", + "zone": "us-east-1b", + "container_started": true, + "service_version": "8.4.3", + "healthy": true, + "maintenance_mode": false, + "instance_name": "instance-0000000000", + "logical_zone": "zone-0", + "instance_configuration": { + "resource": "memory", + "id": "aws.data.highio.i3", + "name": "aws.data.highio.i3" + }, + "memory": { + "instance_capacity": 8192, + "memory_pressure": 1 + }, + "disk": { + "disk_space_available": 245760, + "storage_multiplier": 30.0, + "disk_space_used": 117 + }, + "node_roles": [ + "master", + "ingest", + "transform", + "data_hot", + "remote_cluster_client", + "data_content" + ], + "allocator_id": "i-03b043eb9cee5566b", + "service_running": true + }, + { + "service_roles": [ + "ingest", + "master", + "data" + ], + "instance_set_id": "hot_content", + "zone": "us-east-1e", + "container_started": true, + "service_version": "8.4.3", + "healthy": true, + "maintenance_mode": false, + "instance_name": "instance-0000000001", + "logical_zone": "zone-1", + "instance_configuration": { + "resource": "memory", + "id": "aws.data.highio.i3", + "name": "aws.data.highio.i3" + }, + "memory": { + "instance_capacity": 8192, + "memory_pressure": 1, + "native_memory_pressure": 55 + }, + "disk": { + "disk_space_available": 245760, + "storage_multiplier": 30.0, + "disk_space_used": 117 + }, + "node_roles": [ + "master", + "ingest", + "transform", + "data_hot", + "remote_cluster_client", + "data_content" + ], + "allocator_id": "i-0af729d3a795a93a3", + "service_running": true + }, + { + "service_roles": [ + "master" + ], + "instance_set_id": "hot_content", + "zone": "us-east-1a", + "container_started": true, + "service_version": "8.4.3", + "healthy": true, + "maintenance_mode": false, + "instance_name": "tiebreaker-0000000002", + "logical_zone": "tiebreaker", + "instance_configuration": { + "resource": "memory", + "id": "aws.master.r5d", + "name": "aws.master.r5d" + }, + "memory": { + "instance_capacity": 1024, + "memory_pressure": 10, + "native_memory_pressure": 79 + }, + "disk": { + "disk_space_available": 2048, + "storage_multiplier": 2.0, + "disk_space_used": 0 + }, + "node_roles": [ + "master", + "voting_only" + ], + "allocator_id": "i-04712f4bbc8e7072e", + "service_running": true + } + ] + }, + "metadata": { + "endpoint": "0589ddb3acee4641b95833022bf04d2b.us-east-1.aws.found.io", + "sso_deep_linking_supported": false, + "last_modified": "2022-10-06T09:47:29.809Z", + "aliased_endpoint": "my-deployment-name.es.us-east-1.aws.found.io", + "ccr": true, + "version": 20, + "service_url": "https://0589ddb3acee4641b95833022bf04d2b.us-east-1.aws.found.io", + "aliased_url": "https://my-deployment-name.es.us-east-1.aws.found.io", + "ports": { + "transport_passthrough": 9400, + "http": 9200, + "https": 443 + }, + "cloud_id": "my_deployment_name:someCloudID" + } + }, + "region": "us-east-1", + "id": "0589ddb3acee4641b95833022bf04d2b", + "ref_id": "main-elasticsearch" + } + ], + "apm": [], + "appsearch": [], + "integrations_server": [] + }, + "metadata": { + "last_resource_plan_modified": "2022-10-06T09:47:29.673Z", + "tags": [], + "organization_id": "222222", + "subscription_level": "standard", + "last_modified": "2022-10-06T09:47:29.809Z", + "hidden": false, + "system_owned": false, + "owner_id": "111111" + } +} diff --git a/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2.json b/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2.json new file mode 100644 index 000000000..12d6a363b --- /dev/null +++ b/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2.json @@ -0,0 +1,363 @@ +{ + "instance_configurations": [], + "description": "Use for for all-purpose workloads, including time-series data like logs and metrics.", + "name": "I/O Optimized", + "template_category_id": "io-optimized", + "kibana_deeplink": [ + { + "semver": ">=7.9.0", + "uri": "/app/home" + }, + { + "semver": "<7.9.0", + "uri": "/app/kibana#/home" + } + ], + "id": "aws-io-optimized-v2", + "deployment_template": { + "resources": { + "integrations_server": [ + { + "elasticsearch_cluster_ref_id": "es-ref-id", + "region": "us-east-1", + "plan": { + "cluster_topology": [ + { + "instance_configuration_id": "aws.integrationsserver.r5d", + "zone_count": 1, + "size": { + "resource": "memory", + "value": 1024 + } + } + ], + "integrations_server": {} + }, + "ref_id": "integrations_server-ref-id" + } + ], + "elasticsearch": [ + { + "region": "us-east-1", + "settings": { + "dedicated_masters_threshold": 6 + }, + "plan": { + "autoscaling_enabled": false, + "cluster_topology": [ + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "node_type": { + "data": false, + "master": false, + "ingest": true + }, + "instance_configuration_id": "aws.coordinating.m5d", + "node_roles": [ + "ingest", + "remote_cluster_client" + ], + "id": "coordinating", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 1024 + } + }, + "node_type": { + "data": true, + "master": true, + "ingest": true + }, + "elasticsearch": { + "node_attributes": { + "data": "hot" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 118784 + }, + "instance_configuration_id": "aws.data.highio.i3", + "node_roles": [ + "master", + "ingest", + "transform", + "data_hot", + "remote_cluster_client", + "data_content" + ], + "id": "hot_content", + "size": { + "resource": "memory", + "value": 8192 + } + }, + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "node_type": { + "data": true, + "master": false, + "ingest": false + }, + "elasticsearch": { + "node_attributes": { + "data": "warm" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 118784 + }, + "instance_configuration_id": "aws.data.highstorage.d3", + "node_roles": [ + "data_warm", + "remote_cluster_client" + ], + "id": "warm", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "node_type": { + "data": true, + "master": false, + "ingest": false + }, + "elasticsearch": { + "node_attributes": { + "data": "cold" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 59392 + }, + "instance_configuration_id": "aws.data.highstorage.d3", + "node_roles": [ + "data_cold", + "remote_cluster_client" + ], + "id": "cold", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "node_type": { + "data": true, + "master": false, + "ingest": false + }, + "elasticsearch": { + "node_attributes": { + "data": "frozen" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 122880 + }, + "instance_configuration_id": "aws.es.datafrozen.i3en", + "node_roles": [ + "data_frozen" + ], + "id": "frozen", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 3, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "node_type": { + "data": false, + "master": true, + "ingest": false + }, + "instance_configuration_id": "aws.master.r5d", + "node_roles": [ + "master", + "remote_cluster_client" + ], + "id": "master", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "autoscaling_min": { + "resource": "memory", + "value": 0 + }, + "node_type": { + "data": false, + "master": false, + "ingest": false, + "ml": true + }, + "autoscaling_max": { + "resource": "memory", + "value": 61440 + }, + "instance_configuration_id": "aws.ml.m5d", + "node_roles": [ + "ml", + "remote_cluster_client" + ], + "id": "ml", + "size": { + "resource": "memory", + "value": 0 + } + } + ], + "elasticsearch": {} + }, + "ref_id": "es-ref-id" + } + ], + "enterprise_search": [ + { + "elasticsearch_cluster_ref_id": "es-ref-id", + "region": "us-east-1", + "plan": { + "cluster_topology": [ + { + "node_type": { + "connector": true, + "appserver": true, + "worker": true + }, + "instance_configuration_id": "aws.enterprisesearch.m5d", + "zone_count": 2, + "size": { + "resource": "memory", + "value": 0 + } + } + ], + "enterprise_search": {} + }, + "ref_id": "enterprise_search-ref-id" + } + ], + "kibana": [ + { + "elasticsearch_cluster_ref_id": "es-ref-id", + "region": "us-east-1", + "plan": { + "cluster_topology": [ + { + "instance_configuration_id": "aws.kibana.r5d", + "zone_count": 1, + "size": { + "resource": "memory", + "value": 1024 + } + } + ], + "kibana": {} + }, + "ref_id": "kibana-ref-id" + } + ], + "apm": [ + { + "elasticsearch_cluster_ref_id": "es-ref-id", + "region": "us-east-1", + "plan": { + "cluster_topology": [ + { + "instance_configuration_id": "aws.apm.r5d", + "zone_count": 1, + "size": { + "resource": "memory", + "value": 1024 + } + } + ], + "apm": {} + }, + "ref_id": "apm-ref-id" + } + ] + }, + "settings": { + "autoscaling_enabled": false + } + }, + "system_owned": true, + "metadata": [ + { + "value": "true", + "key": "hidden" + }, + { + "value": "aws-hot-warm-v2", + "key": "hot_warm_template" + }, + { + "value": "true", + "key": "recommended" + }, + { + "value": "true", + "key": "trial-eligible" + }, + { + "value": "stack", + "key": "parent_solution" + } + ] +} diff --git a/ec/ecresource/deploymentresource/testutil_func.go b/ec/ecresource/deploymentresource/testutil/testutil_func.go similarity index 78% rename from ec/ecresource/deploymentresource/testutil_func.go rename to ec/ecresource/deploymentresource/testutil/testutil_func.go index bcaae5251..879d1002a 100644 --- a/ec/ecresource/deploymentresource/testutil_func.go +++ b/ec/ecresource/deploymentresource/testutil/testutil_func.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. -package deploymentresource +package testutil import ( "encoding/json" @@ -23,14 +23,13 @@ import ( "os" "testing" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" ) // parseDeploymentTemplate is a test helper which parse a file by path and // returns a models.DeploymentTemplateInfoV2. -func parseDeploymentTemplate(t *testing.T, name string) *models.DeploymentTemplateInfoV2 { +func ParseDeploymentTemplate(t *testing.T, name string) *models.DeploymentTemplateInfoV2 { t.Helper() f, err := os.Open(name) if err != nil { @@ -53,7 +52,7 @@ func parseDeploymentTemplate(t *testing.T, name string) *models.DeploymentTempla return &res } -func openDeploymentGet(t *testing.T, name string) *models.DeploymentGetResponse { +func OpenDeploymentGet(t *testing.T, name string) *models.DeploymentGetResponse { t.Helper() f, err := os.Open(name) if err != nil { @@ -68,7 +67,7 @@ func openDeploymentGet(t *testing.T, name string) *models.DeploymentGetResponse return &res } -func enrichWithEmptyTopologies(tpl, want *models.ElasticsearchPayload) []*models.ElasticsearchPayload { +func EnrichWithEmptyTopologies(tpl, want *models.ElasticsearchPayload) *models.ElasticsearchPayload { tpl.DisplayName = want.DisplayName tpl.RefID = want.RefID tpl.Region = want.Region @@ -85,10 +84,10 @@ func enrichWithEmptyTopologies(tpl, want *models.ElasticsearchPayload) []*models } } - return []*models.ElasticsearchPayload{tpl} + return tpl } -func readerToESPayload(t *testing.T, rc io.Reader, nr bool) *models.ElasticsearchPayload { +func ReaderToESPayload(t *testing.T, rc io.Reader, nr bool) *models.ElasticsearchPayload { t.Helper() var tpl models.DeploymentTemplateInfoV2 @@ -96,16 +95,10 @@ func readerToESPayload(t *testing.T, rc io.Reader, nr bool) *models.Elasticsearc t.Fatal(err) } - return enrichElasticsearchTemplate( + return utils.EnrichElasticsearchTemplate( tpl.DeploymentTemplate.Resources.Elasticsearch[0], *tpl.ID, "", nr, ) } - -func newDeploymentRD(t *testing.T, id string, raw map[string]interface{}) *schema.ResourceData { - rd := schema.TestResourceDataRaw(t, newSchema(), raw) - rd.SetId(id) - return rd -} diff --git a/ec/ecresource/deploymentresource/testutil_func_test.go b/ec/ecresource/deploymentresource/testutil/testutil_func_test.go similarity index 96% rename from ec/ecresource/deploymentresource/testutil_func_test.go rename to ec/ecresource/deploymentresource/testutil/testutil_func_test.go index e9210f5ba..aa50a0411 100644 --- a/ec/ecresource/deploymentresource/testutil_func_test.go +++ b/ec/ecresource/deploymentresource/testutil/testutil_func_test.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. -package deploymentresource +package testutil import ( "os" @@ -60,7 +60,7 @@ func Test_parseDeploymentTemplate(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := parseDeploymentTemplate(t, tt.args.name) + got := ParseDeploymentTemplate(t, tt.args.name) assert.Equal(t, tt.want, got) }) } diff --git a/ec/ecresource/deploymentresource/testutil_datastruct.go b/ec/ecresource/deploymentresource/testutil_datastruct.go deleted file mode 100644 index 13c475d27..000000000 --- a/ec/ecresource/deploymentresource/testutil_datastruct.go +++ /dev/null @@ -1,263 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "github.com/elastic/cloud-sdk-go/pkg/api/mock" -) - -func newSampleDeployment() map[string]interface{} { - return map[string]interface{}{ - "alias": "my-deployment", - "name": "my_deployment_name", - "deployment_template_id": "aws-hot-warm-v2", - "region": "us-east-1", - "version": "7.11.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "us-east-1", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: value2", - "user_settings_json": "{\"some.setting\":\"value\"}", - "user_settings_override_json": "{\"some.setting\":\"value2\"}", - }}, - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "size": "2g", - "node_roles": []interface{}{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - "zone_count": 1, - }, - map[string]interface{}{ - "id": "warm", - "size": "2g", - "node_roles": []interface{}{ - "data_warm", - "remote_cluster_client", - }, - "zone_count": 1, - }, - }, - }}, - "kibana": []interface{}{newKibanaSample()}, - "apm": []interface{}{newApmSample()}, - "enterprise_search": []interface{}{newEnterpriseSearchSample()}, - "observability": []interface{}{newObservabilitySample()}, - "traffic_filter": []interface{}{"0.0.0.0/0", "192.168.10.0/24"}, - } -} - -func newSampleLegacyDeployment() map[string]interface{} { - return map[string]interface{}{ - "alias": "my-deployment", - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.7.0", - "elasticsearch": []interface{}{newElasticsearchSample()}, - "kibana": []interface{}{newKibanaSample()}, - "apm": []interface{}{newApmSample()}, - "enterprise_search": []interface{}{newEnterpriseSearchSample()}, - "observability": []interface{}{newObservabilitySample()}, - "traffic_filter": []interface{}{"0.0.0.0/0", "192.168.10.0/24"}, - } -} - -func newSampleDeploymentEmptyRD() map[string]interface{} { - return map[string]interface{}{ - "alias": "my-deployment", - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.7.0", - "elasticsearch": []interface{}{map[string]interface{}{}}, - "kibana": []interface{}{map[string]interface{}{}}, - "apm": []interface{}{map[string]interface{}{}}, - "enterprise_search": []interface{}{map[string]interface{}{}}, - "traffic_filter": []interface{}{"0.0.0.0/0", "192.168.10.0/24"}, - } -} - -func newSampleDeploymentOverrides() map[string]interface{} { - return map[string]interface{}{ - "alias": "my-deployment", - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.7.0", - "elasticsearch": []interface{}{map[string]interface{}{ - "ref_id": "main-elasticsearch", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "4g", - }}}, - }, - "kibana": []interface{}{map[string]interface{}{ - "ref_id": "main-kibana", - "topology": []interface{}{map[string]interface{}{ - "size": "2g", - }}}, - }, - "apm": []interface{}{map[string]interface{}{ - "ref_id": "main-apm", - "topology": []interface{}{map[string]interface{}{ - "size": "1g", - }}}, - }, - "enterprise_search": []interface{}{map[string]interface{}{ - "ref_id": "main-enterprise_search", - "topology": []interface{}{map[string]interface{}{ - "size": "4g", - }}}, - }, - "traffic_filter": []interface{}{"0.0.0.0/0", "192.168.10.0/24"}, - } -} - -func newSampleDeploymentOverridesIC() map[string]interface{} { - return map[string]interface{}{ - "alias": "my-deployment", - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.7.0", - "elasticsearch": []interface{}{map[string]interface{}{ - "ref_id": "main-elasticsearch", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - }}}, - }, - "kibana": []interface{}{map[string]interface{}{ - "ref_id": "main-kibana", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r5d", - }}}, - }, - "apm": []interface{}{map[string]interface{}{ - "ref_id": "main-apm", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.apm.r5d", - }}}, - }, - "enterprise_search": []interface{}{map[string]interface{}{ - "ref_id": "main-enterprise_search", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.enterprisesearch.m5d", - }}}, - }, - "traffic_filter": []interface{}{"0.0.0.0/0", "192.168.10.0/24"}, - } -} - -func newElasticsearchSample() map[string]interface{} { - return map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "us-east-1", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: value2", - "user_settings_json": "{\"some.setting\":\"value\"}", - "user_settings_override_json": "{\"some.setting\":\"value2\"}", - }}, - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "instance_configuration_id": "aws.data.highio.i3", - "size": "2g", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - "zone_count": 1, - }}, - } -} - -func newKibanaSample() map[string]interface{} { - return map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "us-east-1", - "topology": []interface{}{ - map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r5d", - "size": "1g", - "zone_count": 1, - }, - }, - } -} - -func newApmSample() map[string]interface{} { - return map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-apm", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "us-east-1", - // Reproduces the case where the default fields are set. - "config": []interface{}{map[string]interface{}{ - "debug_enabled": false, - }}, - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.apm.r5d", - "size": "0.5g", - "zone_count": 1, - }}, - } -} - -func newEnterpriseSearchSample() map[string]interface{} { - return map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-enterprise_search", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "us-east-1", - "topology": []interface{}{ - map[string]interface{}{ - "instance_configuration_id": "aws.enterprisesearch.m5d", - "size": "2g", - "zone_count": 1, - "node_type_appserver": true, - "node_type_connector": true, - "node_type_worker": true, - }, - }, - } -} - -func newObservabilitySample() map[string]interface{} { - return map[string]interface{}{ - "deployment_id": mock.ValidClusterID, - "ref_id": "main-elasticsearch", - "logs": true, - "metrics": true, - } -} diff --git a/ec/ecresource/deploymentresource/topology/v1/topology.go b/ec/ecresource/deploymentresource/topology/v1/topology.go new file mode 100644 index 000000000..5e97eb169 --- /dev/null +++ b/ec/ecresource/deploymentresource/topology/v1/topology.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type TopologyTF struct { + InstanceConfigurationId types.String `tfsdk:"instance_configuration_id"` + Size types.String `tfsdk:"size"` + SizeResource types.String `tfsdk:"size_resource"` + ZoneCount types.Int64 `tfsdk:"zone_count"` +} + +type Topology struct { + InstanceConfigurationId *string `tfsdk:"instance_configuration_id"` + Size *string `tfsdk:"size"` + SizeResource *string `tfsdk:"size_resource"` + ZoneCount int `tfsdk:"zone_count"` +} + +type Topologies []Topology diff --git a/ec/ecresource/deploymentresource/traffic_filter.go b/ec/ecresource/deploymentresource/traffic_filter.go deleted file mode 100644 index a2c1f0c58..000000000 --- a/ec/ecresource/deploymentresource/traffic_filter.go +++ /dev/null @@ -1,69 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/elastic/cloud-sdk-go/pkg/models" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -// flattenTrafficFiltering parses a deployment's traffic filtering settings. -func flattenTrafficFiltering(settings *models.DeploymentSettings) *schema.Set { - if settings == nil || settings.TrafficFilterSettings == nil { - return nil - } - - var rules []interface{} - for _, rule := range settings.TrafficFilterSettings.Rulesets { - rules = append(rules, rule) - } - - if len(rules) > 0 { - return schema.NewSet(schema.HashString, rules) - } - - return nil -} - -// expandTrafficFilterCreate expands the flattened "traffic_filter" settings to -// a DeploymentCreateRequest. -func expandTrafficFilterCreate(set *schema.Set, req *models.DeploymentCreateRequest) { - if set == nil || req == nil { - return - } - - if set.Len() == 0 { - return - } - - if req.Settings == nil { - req.Settings = &models.DeploymentCreateSettings{} - } - - if req.Settings.TrafficFilterSettings == nil { - req.Settings.TrafficFilterSettings = &models.TrafficFilterSettings{} - } - - req.Settings.TrafficFilterSettings.Rulesets = append( - req.Settings.TrafficFilterSettings.Rulesets, - util.ItemsToString(set.List())..., - ) -} diff --git a/ec/ecresource/deploymentresource/update.go b/ec/ecresource/deploymentresource/update.go index 9fae24ab6..c09ae49c9 100644 --- a/ec/ecresource/deploymentresource/update.go +++ b/ec/ecresource/deploymentresource/update.go @@ -19,74 +19,150 @@ package deploymentresource import ( "context" - "strings" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" - "github.com/elastic/cloud-sdk-go/pkg/multierror" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" + v2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/deployment/v2" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/resource" ) -// Update syncs the remote state with the local. -func updateResource(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*api.API) +func (r *Resource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var plan v2.DeploymentTF - if hasDeploymentChange(d) { - if err := updateDeployment(ctx, d, client); err != nil { - return diag.FromErr(err) - } - } + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) - if err := handleTrafficFilterChange(d, client); err != nil { - return diag.FromErr(err) + if resp.Diagnostics.HasError() { + return } - if err := handleRemoteClusters(d, client); err != nil { - return diag.FromErr(err) + var state v2.DeploymentTF + + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + + if resp.Diagnostics.HasError() { + return } - return readResource(ctx, d, meta) -} + updateReq, diags := plan.UpdateRequest(ctx, r.client, state) -func updateDeployment(_ context.Context, d *schema.ResourceData, client *api.API) error { - req, err := updateResourceToModel(d, client) - if err != nil { - return err + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return } res, err := deploymentapi.Update(deploymentapi.UpdateParams{ - API: client, - DeploymentID: d.Id(), - Request: req, + API: r.client, + DeploymentID: plan.Id.Value, + Request: updateReq, Overrides: deploymentapi.PayloadOverrides{ - Version: d.Get("version").(string), - Region: d.Get("region").(string), + Version: plan.Version.Value, + Region: plan.Region.Value, }, }) if err != nil { - return multierror.NewPrefixed("failed updating deployment", err) + resp.Diagnostics.AddError("failed updating deployment", err.Error()) + return + } + + if err := WaitForPlanCompletion(r.client, plan.Id.Value); err != nil { + resp.Diagnostics.AddError("failed tracking update progress", err.Error()) + return } - if err := WaitForPlanCompletion(client, d.Id()); err != nil { - return multierror.NewPrefixed("failed tracking update progress", err) + resp.Diagnostics.Append(handleTrafficFilterChange(ctx, r.client, plan, state)...) + + resp.Diagnostics.Append(v2.HandleRemoteClusters(ctx, r.client, plan.Id.Value, plan.Elasticsearch)...) + + deployment, diags := r.read(ctx, plan.Id.Value, &state, plan, res.Resources) + + resp.Diagnostics.Append(diags...) + + if deployment == nil { + resp.Diagnostics.AddError("cannot read just updated resource", "") + resp.State.RemoveResource(ctx) + return } - return parseCredentials(d, res.Resources) + resp.Diagnostics.Append(resp.State.Set(ctx, deployment)...) } -// hasDeploymentChange checks if there's any change in the resource attributes -// except in the "traffic_filter" prefixed keys. If so, it returns true. -func hasDeploymentChange(d *schema.ResourceData) bool { - for attr := range d.State().Attributes { - if strings.HasPrefix(attr, "traffic_filter") { - continue +func handleTrafficFilterChange(ctx context.Context, client *api.API, plan, state v2.DeploymentTF) diag.Diagnostics { + if plan.TrafficFilter.IsNull() || plan.TrafficFilter.Equal(state.TrafficFilter) { + return nil + } + + var planRules, stateRules ruleSet + if diags := plan.TrafficFilter.ElementsAs(ctx, &planRules, true); diags.HasError() { + return diags + } + + if diags := state.TrafficFilter.ElementsAs(ctx, &stateRules, true); diags.HasError() { + return diags + } + + var rulesToAdd, rulesToDelete []string + + for _, rule := range planRules { + if !stateRules.exist(rule) { + rulesToAdd = append(rulesToAdd, rule) + } + } + + for _, rule := range stateRules { + if !planRules.exist(rule) { + rulesToDelete = append(rulesToDelete, rule) } - // Check if any of the resource attributes has a change. - if d.HasChange(attr) { + } + + var diags diag.Diagnostics + for _, rule := range rulesToAdd { + if err := associateRule(rule, plan.Id.Value, client); err != nil { + diags.AddError("cannot associate traffic filter rule", err.Error()) + } + } + + for _, rule := range rulesToDelete { + if err := removeRule(rule, plan.Id.Value, client); err != nil { + diags.AddError("cannot remove traffic filter rule", err.Error()) + } + } + + return diags +} + +type ruleSet []string + +func (rs ruleSet) exist(rule string) bool { + for _, r := range rs { + if r == rule { return true } } return false } + +func associateRule(ruleID, deploymentID string, client *api.API) error { + res, err := trafficfilterapi.Get(trafficfilterapi.GetParams{ + API: client, ID: ruleID, IncludeAssociations: true, + }) + if err != nil { + return err + } + + // When the rule has already been associated, return. + for _, assoc := range res.Associations { + if deploymentID == *assoc.ID { + return nil + } + } + + // Create assignment. + if err := trafficfilterapi.CreateAssociation(trafficfilterapi.CreateAssociationParams{ + API: client, ID: ruleID, EntityType: "deployment", EntityID: deploymentID, + }); err != nil { + return err + } + return nil +} diff --git a/ec/ecresource/deploymentresource/update_test.go b/ec/ecresource/deploymentresource/update_test.go deleted file mode 100644 index 92d205cdf..000000000 --- a/ec/ecresource/deploymentresource/update_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "testing" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" - - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -func Test_hasDeploymentChange(t *testing.T) { - unchanged := Resource().Data(util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: newSampleLegacyDeployment(), - }).State()) - - changesToTrafficFilter := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "traffic_filter": []interface{}{"1.1.1.1"}, - }, - }) - - changesToName := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{"name": "some name"}, - }) - - changesToRegion := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "some name", - "region": "some-region", - }, - }) - - type args struct { - d *schema.ResourceData - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "when a new resource is persisted and has no changes.", - args: args{d: unchanged}, - want: false, - }, - { - name: "when a new resource has some changes in traffic_filter", - args: args{d: changesToTrafficFilter}, - want: false, - }, - { - name: "when a new resource is has some changes in name", - args: args{d: changesToName}, - want: true, - }, - { - name: "when a new resource is has some changes in name", - args: args{d: changesToRegion}, - want: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := hasDeploymentChange(tt.args.d) - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/ec/ecresource/deploymentresource/update_traffic_rules.go b/ec/ecresource/deploymentresource/update_traffic_rules.go deleted file mode 100644 index f39d47c83..000000000 --- a/ec/ecresource/deploymentresource/update_traffic_rules.go +++ /dev/null @@ -1,115 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -func handleTrafficFilterChange(d *schema.ResourceData, client *api.API) error { - if !d.HasChange("traffic_filter") { - return nil - } - - var additions, deletions = getChange(d.GetChange("traffic_filter")) - for _, ruleID := range additions.List() { - if err := associateRule(ruleID.(string), d.Id(), client); err != nil { - return err - } - } - - for _, ruleID := range deletions.List() { - if err := removeRule(ruleID.(string), d.Id(), client); err != nil { - return err - } - } - - return nil -} - -func getChange(oldInterface, newInterface interface{}) (add, delete *schema.Set) { - var old, new *schema.Set - if s, ok := oldInterface.(*schema.Set); ok { - old = s - } - if s, ok := newInterface.(*schema.Set); ok { - new = s - } - - add = new.Difference(old) - delete = old.Difference(new) - - return add, delete -} - -func associateRule(ruleID, deploymentID string, client *api.API) error { - res, err := trafficfilterapi.Get(trafficfilterapi.GetParams{ - API: client, ID: ruleID, IncludeAssociations: true, - }) - if err != nil { - return err - } - - // When the rule has already been associated, return. - for _, assoc := range res.Associations { - if deploymentID == *assoc.ID { - return nil - } - } - - // Create assignment. - if err := trafficfilterapi.CreateAssociation(trafficfilterapi.CreateAssociationParams{ - API: client, ID: ruleID, EntityType: "deployment", EntityID: deploymentID, - }); err != nil { - return err - } - return nil -} - -func removeRule(ruleID, deploymentID string, client *api.API) error { - res, err := trafficfilterapi.Get(trafficfilterapi.GetParams{ - API: client, ID: ruleID, IncludeAssociations: true, - }) - - // If the rule is gone (403 or 404), return nil. - if err != nil { - if util.TrafficFilterNotFound(err) { - return nil - } - return err - } - - // If the rule is found, then delete the association. - for _, assoc := range res.Associations { - if deploymentID == *assoc.ID { - return trafficfilterapi.DeleteAssociation(trafficfilterapi.DeleteAssociationParams{ - API: client, - ID: ruleID, - EntityID: *assoc.ID, - EntityType: *assoc.EntityType, - }) - } - } - - return nil -} diff --git a/ec/ecresource/deploymentresource/update_traffic_rules_test.go b/ec/ecresource/deploymentresource/update_traffic_rules_test.go deleted file mode 100644 index 89a235e5b..000000000 --- a/ec/ecresource/deploymentresource/update_traffic_rules_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "testing" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" -) - -func Test_getChange(t *testing.T) { - type args struct { - oldInterface interface{} - newInterface interface{} - } - tests := []struct { - name string - args args - wantAdditions []interface{} - wantDeletions []interface{} - }{ - { - name: "diffs totally different slices", - args: args{ - oldInterface: schema.NewSet(schema.HashString, []interface{}{ - "rule 1", "rule 2", - }), - newInterface: schema.NewSet(schema.HashString, []interface{}{ - "rule 3", "rule 4", - }), - }, - wantAdditions: []interface{}{"rule 4", "rule 3"}, - wantDeletions: []interface{}{"rule 1", "rule 2"}, - }, - { - name: "diffs equal slices", - args: args{ - oldInterface: schema.NewSet(schema.HashString, []interface{}{ - "rule 1", "rule 2", - }), - newInterface: schema.NewSet(schema.HashString, []interface{}{ - "rule 1", "rule 2", - }), - }, - wantAdditions: make([]interface{}, 0), - wantDeletions: make([]interface{}, 0), - }, - { - name: "diffs equal slightly slices", - args: args{ - oldInterface: schema.NewSet(schema.HashString, []interface{}{ - "rule 1", "rule 2", - }), - newInterface: schema.NewSet(schema.HashString, []interface{}{ - "rule 1", "rule 2", "rule 3", - }), - }, - wantAdditions: []interface{}{"rule 3"}, - wantDeletions: make([]interface{}, 0), - }, - { - name: "diffs a removal", - args: args{ - newInterface: schema.NewSet(schema.HashString, nil), - oldInterface: schema.NewSet(schema.HashString, []interface{}{ - "rule 1", "rule 2", - }), - }, - wantDeletions: []interface{}{"rule 1", "rule 2"}, - wantAdditions: make([]interface{}, 0), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotAdditions, gotDeletions := getChange(tt.args.oldInterface, tt.args.newInterface) - assert.Equal(t, tt.wantAdditions, gotAdditions.List(), "Additions") - assert.Equal(t, tt.wantDeletions, gotDeletions.List(), "Deletions") - }) - } -} diff --git a/ec/ecresource/deploymentresource/utils/definitions.go b/ec/ecresource/deploymentresource/utils/definitions.go new file mode 100644 index 000000000..706c8bdd2 --- /dev/null +++ b/ec/ecresource/deploymentresource/utils/definitions.go @@ -0,0 +1,23 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package utils + +const ( + // minimumEnterpriseSearchSize = 2048 + MinimumZoneCount = 1 +) diff --git a/ec/ecresource/deploymentresource/utils/enrich_elasticsearch_template.go b/ec/ecresource/deploymentresource/utils/enrich_elasticsearch_template.go new file mode 100644 index 000000000..571282325 --- /dev/null +++ b/ec/ecresource/deploymentresource/utils/enrich_elasticsearch_template.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package utils + +import ( + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" +) + +func EnrichElasticsearchTemplate(tpl *models.ElasticsearchPayload, templateId, version string, useNodeRoles bool) *models.ElasticsearchPayload { + if tpl.Plan.DeploymentTemplate == nil { + tpl.Plan.DeploymentTemplate = &models.DeploymentTemplateReference{} + } + + if tpl.Plan.DeploymentTemplate.ID == nil || *tpl.Plan.DeploymentTemplate.ID == "" { + tpl.Plan.DeploymentTemplate.ID = ec.String(templateId) + } + + if tpl.Plan.Elasticsearch.Version == "" { + tpl.Plan.Elasticsearch.Version = version + } + + for _, topology := range tpl.Plan.ClusterTopology { + if useNodeRoles { + topology.NodeType = nil + continue + } + topology.NodeRoles = nil + } + + return tpl +} + +func EsResource(res *models.DeploymentTemplateInfoV2) *models.ElasticsearchPayload { + if res == nil || len(res.DeploymentTemplate.Resources.Elasticsearch) == 0 { + return &models.ElasticsearchPayload{ + Plan: &models.ElasticsearchClusterPlan{ + Elasticsearch: &models.ElasticsearchConfiguration{}, + }, + Settings: &models.ElasticsearchClusterSettings{}, + } + } + return res.DeploymentTemplate.Resources.Elasticsearch[0] +} diff --git a/ec/internal/flatteners/flatten_tags.go b/ec/ecresource/deploymentresource/utils/get_first.go similarity index 63% rename from ec/internal/flatteners/flatten_tags.go rename to ec/ecresource/deploymentresource/utils/get_first.go index bc455ad8c..eb882c19e 100644 --- a/ec/internal/flatteners/flatten_tags.go +++ b/ec/ecresource/deploymentresource/utils/get_first.go @@ -15,23 +15,30 @@ // specific language governing permissions and limitations // under the License. -package flatteners +package utils import ( - "github.com/hashicorp/terraform-plugin-framework/attr" - "github.com/hashicorp/terraform-plugin-framework/types" + "context" - "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" ) -// flattenTags takes in Deployment Metadata resource models and returns its -// Tags in flattened form. -func FlattenTags(metadataItems []*models.MetadataItem) types.Map { - var tags = make(map[string]attr.Value) - for _, res := range metadataItems { - if res.Key != nil { - tags[*res.Key] = types.String{Value: *res.Value} - } +func GetFirst(ctx context.Context, list types.List, target any) diag.Diagnostics { + if list.IsNull() || list.IsUnknown() || len(list.Elems) == 0 { + return nil + } + + if list.Elems[0].IsUnknown() || list.Elems[0].IsNull() { + return nil } - return types.Map{ElemType: types.StringType, Elems: tags} + + diags := tfsdk.ValueAs(ctx, list.Elems[0], target) + + if diags.HasError() { + return diags + } + + return nil } diff --git a/ec/ecresource/deploymentresource/utils/getters.go b/ec/ecresource/deploymentresource/utils/getters.go new file mode 100644 index 000000000..2441d8875 --- /dev/null +++ b/ec/ecresource/deploymentresource/utils/getters.go @@ -0,0 +1,175 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package utils + +import ( + "errors" + "fmt" + "strings" + + "github.com/blang/semver" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal/util" +) + +func HasRunningResources(res *models.DeploymentGetResponse) bool { + var hasRunning bool + if res.Resources != nil { + for _, r := range res.Resources.Elasticsearch { + if !IsEsResourceStopped(r) { + hasRunning = true + } + } + for _, r := range res.Resources.Kibana { + if !IsKibanaResourceStopped(r) { + hasRunning = true + } + } + for _, r := range res.Resources.Apm { + if !IsApmResourceStopped(r) { + hasRunning = true + } + } + for _, r := range res.Resources.EnterpriseSearch { + if !IsEssResourceStopped(r) { + hasRunning = true + } + } + for _, r := range res.Resources.IntegrationsServer { + if !IsIntegrationsServerResourceStopped(r) { + hasRunning = true + } + } + } + return hasRunning +} + +func GetDeploymentTemplateID(res *models.DeploymentResources) (string, error) { + var deploymentTemplateID string + var foundTemplates []string + for _, esRes := range res.Elasticsearch { + if util.IsCurrentEsPlanEmpty(esRes) { + continue + } + + var emptyDT = esRes.Info.PlanInfo.Current.Plan.DeploymentTemplate == nil + if emptyDT { + continue + } + + if deploymentTemplateID == "" { + deploymentTemplateID = *esRes.Info.PlanInfo.Current.Plan.DeploymentTemplate.ID + } + + foundTemplates = append(foundTemplates, + *esRes.Info.PlanInfo.Current.Plan.DeploymentTemplate.ID, + ) + } + + if deploymentTemplateID == "" { + return "", errors.New("failed to obtain the deployment template id") + } + + if len(foundTemplates) > 1 { + return "", fmt.Errorf( + "there are more than 1 deployment templates specified on the deployment: \"%s\"", strings.Join(foundTemplates, ", "), + ) + } + + return deploymentTemplateID, nil +} + +func GetRegion(res *models.DeploymentResources) (region string) { + for _, r := range res.Elasticsearch { + if r.Region != nil && *r.Region != "" { + return *r.Region + } + } + + return region +} + +func GetLowestVersion(res *models.DeploymentResources) (string, error) { + // We're starting off with a very high version so it can be replaced. + replaceVersion := `99.99.99` + version := semver.MustParse(replaceVersion) + for _, r := range res.Elasticsearch { + if !util.IsCurrentEsPlanEmpty(r) { + v := r.Info.PlanInfo.Current.Plan.Elasticsearch.Version + if err := swapLowerVersion(&version, v); err != nil && !IsEsResourceStopped(r) { + return "", fmt.Errorf("elasticsearch version '%s' is not semver compliant: %w", v, err) + } + } + } + + for _, r := range res.Kibana { + if !util.IsCurrentKibanaPlanEmpty(r) { + v := r.Info.PlanInfo.Current.Plan.Kibana.Version + if err := swapLowerVersion(&version, v); err != nil && !IsKibanaResourceStopped(r) { + return version.String(), fmt.Errorf("kibana version '%s' is not semver compliant: %w", v, err) + } + } + } + + for _, r := range res.Apm { + if !util.IsCurrentApmPlanEmpty(r) { + v := r.Info.PlanInfo.Current.Plan.Apm.Version + if err := swapLowerVersion(&version, v); err != nil && !IsApmResourceStopped(r) { + return version.String(), fmt.Errorf("apm version '%s' is not semver compliant: %w", v, err) + } + } + } + + for _, r := range res.IntegrationsServer { + if !util.IsCurrentIntegrationsServerPlanEmpty(r) { + v := r.Info.PlanInfo.Current.Plan.IntegrationsServer.Version + if err := swapLowerVersion(&version, v); err != nil && !IsIntegrationsServerResourceStopped(r) { + return version.String(), fmt.Errorf("integrations_server version '%s' is not semver compliant: %w", v, err) + } + } + } + + for _, r := range res.EnterpriseSearch { + if !util.IsCurrentEssPlanEmpty(r) { + v := r.Info.PlanInfo.Current.Plan.EnterpriseSearch.Version + if err := swapLowerVersion(&version, v); err != nil && !IsEssResourceStopped(r) { + return version.String(), fmt.Errorf("enterprise search version '%s' is not semver compliant: %w", v, err) + } + } + } + + if version.String() != replaceVersion { + return version.String(), nil + } + return "", errors.New("unable to determine the lowest version for any the deployment components") +} + +func swapLowerVersion(version *semver.Version, comp string) error { + if comp == "" { + return nil + } + + v, err := semver.Parse(comp) + if err != nil { + return err + } + if v.LT(*version) { + *version = v + } + return nil +} diff --git a/ec/ecresource/deploymentresource/utils/getters_test.go b/ec/ecresource/deploymentresource/utils/getters_test.go new file mode 100644 index 000000000..9f9afb039 --- /dev/null +++ b/ec/ecresource/deploymentresource/utils/getters_test.go @@ -0,0 +1,206 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package utils + +import ( + "errors" + "testing" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/stretchr/testify/assert" +) + +func Test_getDeploymentTemplateID(t *testing.T) { + type args struct { + res *models.DeploymentResources + } + tests := []struct { + name string + args args + want string + err error + }{ + { + name: "empty resources returns an error", + args: args{res: &models.DeploymentResources{}}, + err: errors.New("failed to obtain the deployment template id"), + }, + { + name: "single empty current plan returns error", + args: args{res: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{ + { + Info: &models.ElasticsearchClusterInfo{ + PlanInfo: &models.ElasticsearchClusterPlansInfo{ + Pending: &models.ElasticsearchClusterPlanInfo{ + Plan: &models.ElasticsearchClusterPlan{ + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized"), + }, + }, + }, + }, + }, + }, + }, + }}, + err: errors.New("failed to obtain the deployment template id"), + }, + { + name: "multiple deployment templates returns an error", + args: args{res: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{ + { + Info: &models.ElasticsearchClusterInfo{ + PlanInfo: &models.ElasticsearchClusterPlansInfo{ + Current: &models.ElasticsearchClusterPlanInfo{ + Plan: &models.ElasticsearchClusterPlan{ + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("someid"), + }, + }, + }, + }, + }, + }, + { + Info: &models.ElasticsearchClusterInfo{ + PlanInfo: &models.ElasticsearchClusterPlansInfo{ + Current: &models.ElasticsearchClusterPlanInfo{ + Plan: &models.ElasticsearchClusterPlan{ + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("someotherid"), + }, + }, + }, + }, + }, + }, + }, + }}, + err: errors.New("there are more than 1 deployment templates specified on the deployment: \"someid, someotherid\""), + }, + { + name: "single deployment template returns it", + args: args{res: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{ + { + Info: &models.ElasticsearchClusterInfo{ + PlanInfo: &models.ElasticsearchClusterPlansInfo{ + Current: &models.ElasticsearchClusterPlanInfo{ + Plan: &models.ElasticsearchClusterPlan{ + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized"), + }, + }, + }, + }, + }, + }, + }, + }}, + want: "aws-io-optimized", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := GetDeploymentTemplateID(tt.args.res) + if tt.err != nil { + assert.EqualError(t, err, tt.err.Error()) + } else { + assert.NoError(t, err) + } + + assert.Equal(t, tt.want, got) + }) + } +} + +func Test_hasRunningResources(t *testing.T) { + type args struct { + res *models.DeploymentGetResponse + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "has all the resources stopped", + args: args{res: &models.DeploymentGetResponse{Resources: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{ + {Info: &models.ElasticsearchClusterInfo{Status: ec.String("stopped")}}, + }, + Kibana: []*models.KibanaResourceInfo{ + {Info: &models.KibanaClusterInfo{Status: ec.String("stopped")}}, + }, + Apm: []*models.ApmResourceInfo{ + {Info: &models.ApmInfo{Status: ec.String("stopped")}}, + }, + EnterpriseSearch: []*models.EnterpriseSearchResourceInfo{ + {Info: &models.EnterpriseSearchInfo{Status: ec.String("stopped")}}, + }, + }}}, + want: false, + }, + { + name: "has some resources stopped", + args: args{res: &models.DeploymentGetResponse{Resources: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{ + {Info: &models.ElasticsearchClusterInfo{Status: ec.String("running")}}, + }, + Kibana: []*models.KibanaResourceInfo{ + {Info: &models.KibanaClusterInfo{Status: ec.String("stopped")}}, + }, + Apm: []*models.ApmResourceInfo{ + {Info: &models.ApmInfo{Status: ec.String("running")}}, + }, + EnterpriseSearch: []*models.EnterpriseSearchResourceInfo{ + {Info: &models.EnterpriseSearchInfo{Status: ec.String("running")}}, + }, + }}}, + want: true, + }, + { + name: "has all resources running", + args: args{res: &models.DeploymentGetResponse{Resources: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{ + {Info: &models.ElasticsearchClusterInfo{Status: ec.String("running")}}, + }, + Kibana: []*models.KibanaResourceInfo{ + {Info: &models.KibanaClusterInfo{Status: ec.String("running")}}, + }, + Apm: []*models.ApmResourceInfo{ + {Info: &models.ApmInfo{Status: ec.String("running")}}, + }, + EnterpriseSearch: []*models.EnterpriseSearchResourceInfo{ + {Info: &models.EnterpriseSearchInfo{Status: ec.String("running")}}, + }, + }}}, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := HasRunningResources(tt.args.res); got != tt.want { + t.Errorf("hasRunningResources() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/ec/ecresource/deploymentresource/utils/missing_field_error.go b/ec/ecresource/deploymentresource/utils/missing_field_error.go new file mode 100644 index 000000000..0c2d5338e --- /dev/null +++ b/ec/ecresource/deploymentresource/utils/missing_field_error.go @@ -0,0 +1,24 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package utils + +import "fmt" + +func MissingField(field string) error { + return fmt.Errorf("server response doesn't contain deployment '%s'", field) +} diff --git a/ec/ecresource/deploymentresource/utils/node_types_to_node_roles.go b/ec/ecresource/deploymentresource/utils/node_types_to_node_roles.go new file mode 100644 index 000000000..ec1a70ff9 --- /dev/null +++ b/ec/ecresource/deploymentresource/utils/node_types_to_node_roles.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package utils + +import ( + "fmt" + + "github.com/blang/semver" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +var ( + DataTiersVersion = semver.MustParse("7.10.0") +) + +func UseNodeRoles(stateVersion, planVersion types.String) (bool, diag.Diagnostics) { + + useNodeRoles, err := CompatibleWithNodeRoles(planVersion.Value) + + if err != nil { + var diags diag.Diagnostics + diags.AddError("Failed to determine whether to use node_roles", err.Error()) + return false, diags + } + + convertLegacy, diags := LegacyToNodeRoles(stateVersion, planVersion) + + if diags.HasError() { + return false, diags + } + + return useNodeRoles && convertLegacy, nil +} + +func CompatibleWithNodeRoles(version string) (bool, error) { + deploymentVersion, err := semver.Parse(version) + if err != nil { + return false, fmt.Errorf("failed to parse Elasticsearch version: %w", err) + } + + return deploymentVersion.GE(DataTiersVersion), nil +} + +// LegacyToNodeRoles returns true when the legacy "node_type_*" should be +// migrated over to node_roles. Which will be true when: +// * The version field doesn't change. +// * The version field changes but: +// - The Elasticsearch.0.toplogy doesn't have any node_type_* set. +func LegacyToNodeRoles(stateVersion, planVersion types.String) (bool, diag.Diagnostics) { + if stateVersion.Value == "" || stateVersion.Value == planVersion.Value { + return true, nil + } + + // If the previous version is empty, node_roles should be used. + if stateVersion.Value == "" { + return true, nil + } + + var diags diag.Diagnostics + oldV, err := semver.Parse(stateVersion.Value) + if err != nil { + diags.AddError("failed to parse previous Elasticsearch version", err.Error()) + return false, diags + } + newV, err := semver.Parse(planVersion.Value) + if err != nil { + diags.AddError("failed to parse new Elasticsearch version", err.Error()) + return false, diags + } + + // if the version change moves from non-node_roles to one + // that supports node roles, do not migrate on that step. + if oldV.LT(DataTiersVersion) && newV.GE(DataTiersVersion) { + return false, nil + } + + return true, nil +} diff --git a/ec/ecresource/deploymentresource/stopped_resource.go b/ec/ecresource/deploymentresource/utils/stopped_resource.go similarity index 68% rename from ec/ecresource/deploymentresource/stopped_resource.go rename to ec/ecresource/deploymentresource/utils/stopped_resource.go index 9d09bf52f..beedaf9b7 100644 --- a/ec/ecresource/deploymentresource/stopped_resource.go +++ b/ec/ecresource/deploymentresource/utils/stopped_resource.go @@ -15,36 +15,36 @@ // specific language governing permissions and limitations // under the License. -package deploymentresource +package utils import "github.com/elastic/cloud-sdk-go/pkg/models" -// isApmResourceStopped returns true if the resource is stopped. -func isApmResourceStopped(res *models.ApmResourceInfo) bool { +// IsApmResourceStopped returns true if the resource is stopped. +func IsApmResourceStopped(res *models.ApmResourceInfo) bool { return res == nil || res.Info == nil || res.Info.Status == nil || *res.Info.Status == "stopped" } -// isIntegrationsServerResourceStopped returns true if the resource is stopped. -func isIntegrationsServerResourceStopped(res *models.IntegrationsServerResourceInfo) bool { +// IsIntegrationsServerResourceStopped returns true if the resource is stopped. +func IsIntegrationsServerResourceStopped(res *models.IntegrationsServerResourceInfo) bool { return res == nil || res.Info == nil || res.Info.Status == nil || *res.Info.Status == "stopped" } -// isEsResourceStopped returns true if the resource is stopped. -func isEsResourceStopped(res *models.ElasticsearchResourceInfo) bool { +// IsEsResourceStopped returns true if the resource is stopped. +func IsEsResourceStopped(res *models.ElasticsearchResourceInfo) bool { return res == nil || res.Info == nil || res.Info.Status == nil || *res.Info.Status == "stopped" } -// isEssResourceStopped returns true if the resource is stopped. -func isEssResourceStopped(res *models.EnterpriseSearchResourceInfo) bool { +// IsEssResourceStopped returns true if the resource is stopped. +func IsEssResourceStopped(res *models.EnterpriseSearchResourceInfo) bool { return res == nil || res.Info == nil || res.Info.Status == nil || *res.Info.Status == "stopped" } -// isKibanaResourceStopped returns true if the resource is stopped. -func isKibanaResourceStopped(res *models.KibanaResourceInfo) bool { +// IsKibanaResourceStopped returns true if the resource is stopped. +func IsKibanaResourceStopped(res *models.KibanaResourceInfo) bool { return res == nil || res.Info == nil || res.Info.Status == nil || *res.Info.Status == "stopped" } diff --git a/ec/ecresource/deploymentresource/stopped_resource_test.go b/ec/ecresource/deploymentresource/utils/stopped_resource_test.go similarity index 89% rename from ec/ecresource/deploymentresource/stopped_resource_test.go rename to ec/ecresource/deploymentresource/utils/stopped_resource_test.go index bd2bb0ae3..da0017519 100644 --- a/ec/ecresource/deploymentresource/stopped_resource_test.go +++ b/ec/ecresource/deploymentresource/utils/stopped_resource_test.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. -package deploymentresource +package utils import ( "testing" @@ -26,7 +26,7 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/util/ec" ) -func Test_isApmResourceStopped(t *testing.T) { +func Test_IsApmResourceStopped(t *testing.T) { type args struct { res *models.ApmResourceInfo } @@ -52,13 +52,13 @@ func Test_isApmResourceStopped(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := isApmResourceStopped(tt.args.res) + got := IsApmResourceStopped(tt.args.res) assert.Equal(t, tt.want, got) }) } } -func Test_isEsResourceStopped(t *testing.T) { +func Test_IsEsResourceStopped(t *testing.T) { type args struct { res *models.ElasticsearchResourceInfo } @@ -84,13 +84,13 @@ func Test_isEsResourceStopped(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := isEsResourceStopped(tt.args.res) + got := IsEsResourceStopped(tt.args.res) assert.Equal(t, tt.want, got) }) } } -func Test_isEssResourceStopped(t *testing.T) { +func Test_IsEssResourceStopped(t *testing.T) { type args struct { res *models.EnterpriseSearchResourceInfo } @@ -116,13 +116,13 @@ func Test_isEssResourceStopped(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := isEssResourceStopped(tt.args.res) + got := IsEssResourceStopped(tt.args.res) assert.Equal(t, tt.want, got) }) } } -func Test_isKibanaResourceStopped(t *testing.T) { +func Test_IsKibanaResourceStopped(t *testing.T) { type args struct { res *models.KibanaResourceInfo } @@ -148,7 +148,7 @@ func Test_isKibanaResourceStopped(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := isKibanaResourceStopped(tt.args.res) + got := IsKibanaResourceStopped(tt.args.res) assert.Equal(t, tt.want, got) }) } diff --git a/ec/ecresource/extensionresource/create.go b/ec/ecresource/extensionresource/create.go index 980d16132..2de012b94 100644 --- a/ec/ecresource/extensionresource/create.go +++ b/ec/ecresource/extensionresource/create.go @@ -1,3 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + package extensionresource import ( diff --git a/ec/ecresource/extensionresource/delete.go b/ec/ecresource/extensionresource/delete.go index a34620769..8b02db55d 100644 --- a/ec/ecresource/extensionresource/delete.go +++ b/ec/ecresource/extensionresource/delete.go @@ -1,3 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + package extensionresource import ( diff --git a/ec/ecresource/extensionresource/read.go b/ec/ecresource/extensionresource/read.go index b7372c725..8663e8a6d 100644 --- a/ec/ecresource/extensionresource/read.go +++ b/ec/ecresource/extensionresource/read.go @@ -1,3 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + package extensionresource import ( diff --git a/ec/ecresource/extensionresource/update.go b/ec/ecresource/extensionresource/update.go index 1cb3b37e9..d95799d23 100644 --- a/ec/ecresource/extensionresource/update.go +++ b/ec/ecresource/extensionresource/update.go @@ -1,3 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + package extensionresource import ( diff --git a/ec/internal/converters/convert_tags.go b/ec/internal/converters/convert_tags.go new file mode 100644 index 000000000..db5f787d9 --- /dev/null +++ b/ec/internal/converters/convert_tags.go @@ -0,0 +1,96 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package converters + +import ( + "context" + "sort" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" +) + +// flattenTags takes in Deployment Metadata resource models and returns its +// Tags in flattened form. +func TagsToTypeMap(metadataItems []*models.MetadataItem) types.Map { + var tags = make(map[string]attr.Value) + for _, res := range metadataItems { + if res.Key != nil { + tags[*res.Key] = types.String{Value: *res.Value} + } + } + return types.Map{ElemType: types.StringType, Elems: tags} +} + +// flattenTags takes in Deployment Metadata resource models and returns its +// Tags as Go map +func TagsToMap(metadataItems []*models.MetadataItem) map[string]string { + if len(metadataItems) == 0 { + return nil + } + res := make(map[string]string) + for _, item := range metadataItems { + if item.Key != nil { + res[*item.Key] = *item.Value + } + } + return res +} + +func MapToTags(raw map[string]string) []*models.MetadataItem { + result := make([]*models.MetadataItem, 0, len(raw)) + for k, v := range raw { + result = append(result, &models.MetadataItem{ + Key: ec.String(k), + Value: ec.String(v), + }) + } + + // Sort by key + sort.SliceStable(result, func(i, j int) bool { + return *result[i].Key < *result[j].Key + }) + + return result +} + +func TFmapToTags(ctx context.Context, raw types.Map) ([]*models.MetadataItem, diag.Diagnostics) { + result := make([]*models.MetadataItem, 0, len(raw.Elems)) + for k, v := range raw.Elems { + var tag string + if diags := tfsdk.ValueAs(ctx, v, &tag); diags.HasError() { + return nil, diags + } + result = append(result, &models.MetadataItem{ + Key: ec.String(k), + Value: ec.String(tag), + }) + } + + // Sort by key + sort.SliceStable(result, func(i, j int) bool { + return *result[i].Key < *result[j].Key + }) + + return result, nil +} diff --git a/ec/internal/flatteners/flatten_tags_test.go b/ec/internal/converters/convert_tags_test.go similarity index 96% rename from ec/internal/flatteners/flatten_tags_test.go rename to ec/internal/converters/convert_tags_test.go index 6775d6b04..535f03433 100644 --- a/ec/internal/flatteners/flatten_tags_test.go +++ b/ec/internal/converters/convert_tags_test.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. -package flatteners +package converters import ( "context" @@ -72,7 +72,7 @@ func TestFlattenTags(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result := FlattenTags(tt.args.metadata.Tags) + result := TagsToTypeMap(tt.args.metadata.Tags) got := make(map[string]string, len(result.Elems)) result.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) diff --git a/ec/internal/converters/extract_endpoint.go b/ec/internal/converters/extract_endpoint.go new file mode 100644 index 000000000..2ae395fba --- /dev/null +++ b/ec/internal/converters/extract_endpoint.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package converters + +import ( + "fmt" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// FlattenClusterEndpoint receives a ClusterMetadataInfo, parses the http and +// https endpoints and returns a map with two keys: `http_endpoint` and +// `https_endpoint` +func ExtractEndpointsTF(metadata *models.ClusterMetadataInfo) (httpEndpoint, httpsEndpoint types.String) { + if metadata == nil || metadata.Endpoint == "" || metadata.Ports == nil { + return + } + + if metadata.Ports.HTTP != nil { + httpEndpoint = types.String{Value: fmt.Sprintf("http://%s:%d", metadata.Endpoint, *metadata.Ports.HTTP)} + } + + if metadata.Ports.HTTPS != nil { + httpsEndpoint = types.String{Value: fmt.Sprintf("https://%s:%d", metadata.Endpoint, *metadata.Ports.HTTPS)} + } + + return +} + +// FlattenClusterEndpoint receives a ClusterMetadataInfo, parses the http and +// https endpoints and returns a map with two keys: `http_endpoint` and +// `https_endpoint` +func ExtractEndpoints(metadata *models.ClusterMetadataInfo) (httpEndpoint, httpsEndpoint *string) { + if metadata == nil || metadata.Endpoint == "" || metadata.Ports == nil { + return + } + + if metadata.Ports.HTTP != nil { + httpEndpoint = ec.String(fmt.Sprintf("http://%s:%d", metadata.Endpoint, *metadata.Ports.HTTP)) + } + + if metadata.Ports.HTTPS != nil { + httpsEndpoint = ec.String(fmt.Sprintf("https://%s:%d", metadata.Endpoint, *metadata.Ports.HTTPS)) + } + + return +} diff --git a/ec/internal/converters/parse_topology_size.go b/ec/internal/converters/parse_topology_size.go new file mode 100644 index 000000000..3927ecf71 --- /dev/null +++ b/ec/internal/converters/parse_topology_size.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package converters + +import ( + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deploymentsize" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func ParseTopologySizeTF(size, sizeResource types.String) (*models.TopologySize, error) { + return ParseTopologySize(&size.Value, &sizeResource.Value) +} + +func ParseTopologySize(size, sizeResource *string) (*models.TopologySize, error) { + if size == nil || *size == "" { + return nil, nil + } + + val, err := deploymentsize.ParseGb(*size) + if err != nil { + return nil, err + } + + resource := "memory" + + if sizeResource != nil && *sizeResource != "" { + resource = *sizeResource + } + + return &models.TopologySize{ + Value: ec.Int32(val), + Resource: ec.String(resource), + }, nil +} diff --git a/ec/internal/util/parsers.go b/ec/internal/util/parsers.go index 988115e0b..4c512c335 100644 --- a/ec/internal/util/parsers.go +++ b/ec/internal/util/parsers.go @@ -37,23 +37,21 @@ func MemoryToState(mem int32) string { // ParseTopologySize parses a flattened topology into its model. func ParseTopologySize(topology map[string]interface{}) (*models.TopologySize, error) { - if mem, ok := topology["size"]; ok { - if m := mem.(string); m != "" { - val, err := deploymentsize.ParseGb(m) - if err != nil { - return nil, err - } - - var sizeResource = defaultSizeResource - if sr, ok := topology["size_resource"]; ok { - sizeResource = sr.(string) - } - - return &models.TopologySize{ - Value: ec.Int32(val), - Resource: ec.String(sizeResource), - }, nil + if mem, ok := topology["size"].(string); ok && mem != "" { + val, err := deploymentsize.ParseGb(mem) + if err != nil { + return nil, err } + + var sizeResource = defaultSizeResource + if sr, ok := topology["size_resource"].(string); ok { + sizeResource = sr + } + + return &models.TopologySize{ + Value: ec.Int32(val), + Resource: ec.String(sizeResource), + }, nil } return nil, nil diff --git a/ec/internal/validators/length.go b/ec/internal/validators/length.go new file mode 100644 index 000000000..4e9bf1659 --- /dev/null +++ b/ec/internal/validators/length.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package validators + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +type lengthValidator struct { + min int + max int +} + +// Description returns a plain text description of the validator's behavior, suitable for a practitioner to understand its impact. +func (v lengthValidator) Description(ctx context.Context) string { + return "Value must not be empty" +} + +// MarkdownDescription returns a markdown formatted description of the validator's behavior, suitable for a practitioner to understand its impact. +func (v lengthValidator) MarkdownDescription(ctx context.Context) string { + return v.Description(ctx) +} + +// Validate runs the main validation logic of the validator, reading configuration data out of `req` and updating `resp` with diagnostics. +func (v lengthValidator) Validate(ctx context.Context, req tfsdk.ValidateAttributeRequest, resp *tfsdk.ValidateAttributeResponse) { + if req.AttributeConfig.IsUnknown() || req.AttributeConfig.IsNull() { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + v.Description(ctx), + "Value must be set", + ) + return + } + + if length := len(req.AttributeConfig.String()); length < v.min || length > v.max { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + v.Description(ctx), + fmt.Sprintf("Length should be between %d and %d", v.min, v.max), + ) + return + } +} + +// Length returns an AttributeValidator which ensures that any configured +// attribute value: +// +// - Has a length between min and max. +func Length(min, max int) tfsdk.AttributeValidator { + return lengthValidator{min: min, max: max} +} diff --git a/ec/internal/validators/notempty.go b/ec/internal/validators/notempty.go new file mode 100644 index 000000000..d7cd5feb4 --- /dev/null +++ b/ec/internal/validators/notempty.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package validators + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +type notEmptyValidator struct{} + +// Description returns a plain text description of the validator's behavior, suitable for a practitioner to understand its impact. +func (v notEmptyValidator) Description(ctx context.Context) string { + return "Value must not be empty" +} + +// MarkdownDescription returns a markdown formatted description of the validator's behavior, suitable for a practitioner to understand its impact. +func (v notEmptyValidator) MarkdownDescription(ctx context.Context) string { + return v.Description(ctx) +} + +// Validate runs the main validation logic of the validator, reading configuration data out of `req` and updating `resp` with diagnostics. +func (v notEmptyValidator) Validate(ctx context.Context, req tfsdk.ValidateAttributeRequest, resp *tfsdk.ValidateAttributeResponse) { + if req.AttributeConfig.IsUnknown() || req.AttributeConfig.IsNull() || req.AttributeConfig.String() == "" { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + v.Description(ctx), + "Value must be set and not empty", + ) + return + } +} + +// NotEmpty returns an AttributeValidator which ensures that any configured +// attribute value: +// +// - Is known. +// - Is set. +// - Is no empty string. +func NotEmpty() tfsdk.AttributeValidator { + return notEmptyValidator{} +} diff --git a/ec/internal/validators/oneOf.go b/ec/internal/validators/oneOf.go new file mode 100644 index 000000000..74f864321 --- /dev/null +++ b/ec/internal/validators/oneOf.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package validators + +import ( + "context" + "fmt" + + "github.com/elastic/cloud-sdk-go/pkg/util/slice" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +type oneOf struct { + values []string +} + +// Description returns a plain text description of the validator's behavior, suitable for a practitioner to understand its impact. +func (v oneOf) Description(ctx context.Context) string { + return "Value must not be empty" +} + +// MarkdownDescription returns a markdown formatted description of the validator's behavior, suitable for a practitioner to understand its impact. +func (v oneOf) MarkdownDescription(ctx context.Context) string { + return v.Description(ctx) +} + +// Validate runs the main validation logic of the validator, reading configuration data out of `req` and updating `resp` with diagnostics. +func (v oneOf) Validate(ctx context.Context, req tfsdk.ValidateAttributeRequest, resp *tfsdk.ValidateAttributeResponse) { + if req.AttributeConfig.IsNull() || req.AttributeConfig.IsUnknown() { + return + } + + if value := req.AttributeConfig.String(); !slice.HasString(v.values, value) { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + v.Description(ctx), + fmt.Sprintf("%v - invalid extension type %s: accepted values are %v", req.AttributePath, value, v.values), + ) + return + } +} + +// OneOf returns an AttributeValidator which ensures that any configured +// attribute value: +// +// - Is one of the accepted values. +func OneOf(values []string) tfsdk.AttributeValidator { + return oneOf{values: values} +} diff --git a/ec/provider.go b/ec/provider.go index 3c243cf07..ac765fad8 100644 --- a/ec/provider.go +++ b/ec/provider.go @@ -33,12 +33,11 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/terraform-provider-ec/ec/ecresource/elasticsearchkeystoreresource" - "github.com/elastic/terraform-provider-ec/ec/ecdatasource/deploymentdatasource" "github.com/elastic/terraform-provider-ec/ec/ecdatasource/deploymentsdatasource" "github.com/elastic/terraform-provider-ec/ec/ecdatasource/stackdatasource" "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource" + "github.com/elastic/terraform-provider-ec/ec/ecresource/elasticsearchkeystoreresource" "github.com/elastic/terraform-provider-ec/ec/ecresource/extensionresource" "github.com/elastic/terraform-provider-ec/ec/ecresource/trafficfilterassocresource" "github.com/elastic/terraform-provider-ec/ec/ecresource/trafficfilterresource" @@ -71,18 +70,6 @@ var ( defaultTimeout = 40 * time.Second ) -// LegacyProvider returns a schema.Provider. -func LegacyProvider() *schema.Provider { - return &schema.Provider{ - ConfigureContextFunc: configureAPI, - Schema: newSchema(), - DataSourcesMap: map[string]*schema.Resource{}, - ResourcesMap: map[string]*schema.Resource{ - "ec_deployment": deploymentresource.Resource(), - }, - } -} - func newSchema() map[string]*schema.Schema { // This schema must match exactly the Terraform Protocol v6 (Terraform Plugin Framework) provider's schema. // Notably the attributes can have no Default values. @@ -170,6 +157,7 @@ func (p *Provider) Resources(ctx context.Context) []func() resource.Resource { return []func() resource.Resource{ func() resource.Resource { return &elasticsearchkeystoreresource.Resource{} }, func() resource.Resource { return &extensionresource.Resource{} }, + func() resource.Resource { return &deploymentresource.Resource{} }, func() resource.Resource { return &trafficfilterresource.Resource{} }, func() resource.Resource { return &trafficfilterassocresource.Resource{} }, } diff --git a/ec/provider_config.go b/ec/provider_config.go index 1be54bfab..9816c1f80 100644 --- a/ec/provider_config.go +++ b/ec/provider_config.go @@ -18,13 +18,11 @@ package ec import ( - "context" "fmt" "net/http" "os" "time" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/elastic/cloud-sdk-go/pkg/api" @@ -42,19 +40,6 @@ var ( DefaultHTTPRetries = 2 ) -func configureAPI(_ context.Context, d *schema.ResourceData) (interface{}, diag.Diagnostics) { - cfg, err := newAPIConfigLegacy(d) - if err != nil { - return nil, diag.FromErr(err) - } - client, err := api.NewAPI(cfg) - if err != nil { - return nil, diag.FromErr(err) - } - - return client, nil -} - func newAPIConfigLegacy(d *schema.ResourceData) (api.Config, error) { endpoint := util.GetStringFromSchemaOrEnv(d, "endpoint", []string{"EC_ENDPOINT", "EC_HOST"}, api.ESSEndpoint) apiKey := util.GetStringFromSchemaOrEnv(d, "apikey", []string{"EC_API_KEY"}, "") diff --git a/ec/version.go b/ec/version.go index e00612ef5..eb125514b 100644 --- a/ec/version.go +++ b/ec/version.go @@ -18,4 +18,4 @@ package ec // Version contains the current terraform provider version. -const Version = "0.5.0-dev" +const Version = "0.6.0-dev" diff --git a/examples/deployment/deployment.tf b/examples/deployment/deployment.tf index c0f5864be..af7ef098d 100644 --- a/examples/deployment/deployment.tf +++ b/examples/deployment/deployment.tf @@ -6,7 +6,7 @@ terraform { required_providers { ec = { source = "elastic/ec" - version = "0.5.0" + version = "0.6.0" } } } @@ -28,23 +28,23 @@ resource "ec_deployment" "example_minimal" { version = data.ec_stack.latest.version deployment_template_id = "aws-io-optimized-v2" - elasticsearch { - config { + elasticsearch = { + + hot = { + autoscaling = {} + } + config = { user_settings_yaml = file("./es_settings.yaml") } } - kibana {} + kibana = {} - enterprise_search { - topology { - zone_count = 1 - } + enterprise_search = { + zone_count = 1 } - apm { - topology { - size = "0.5g" - } + apm = { + size = "0.5g" } } \ No newline at end of file diff --git a/examples/deployment_ccs/deployment.tf b/examples/deployment_ccs/deployment.tf index 7efd01323..26e9b7abb 100644 --- a/examples/deployment_ccs/deployment.tf +++ b/examples/deployment_ccs/deployment.tf @@ -4,7 +4,7 @@ terraform { required_providers { ec = { source = "elastic/ec" - version = "0.5.0" + version = "0.6.0" } } } @@ -24,11 +24,12 @@ resource "ec_deployment" "source_deployment" { version = data.ec_stack.latest.version deployment_template_id = "aws-io-optimized-v2" - elasticsearch { - topology { - id = "hot_content" - zone_count = 1 - size = "2g" + elasticsearch = { + config = {} + hot = { + zone_count = 1 + size = "2g" + autoscaling = {} } } } @@ -40,11 +41,12 @@ resource "ec_deployment" "second_source" { version = data.ec_stack.latest.version deployment_template_id = "aws-io-optimized-v2" - elasticsearch { - topology { - id = "hot_content" - zone_count = 1 - size = "2g" + elasticsearch = { + config = {} + hot = { + zone_count = 1 + size = "2g" + autoscaling = {} } } } @@ -56,19 +58,25 @@ resource "ec_deployment" "ccs" { version = data.ec_stack.latest.version deployment_template_id = "aws-cross-cluster-search-v2" - elasticsearch { - remote_cluster { - deployment_id = ec_deployment.source_deployment.id - alias = ec_deployment.source_deployment.name - ref_id = ec_deployment.source_deployment.elasticsearch.0.ref_id + elasticsearch = { + config = {} + hot = { + autoscaling = {} } - remote_cluster { - deployment_id = ec_deployment.second_source.id - alias = ec_deployment.second_source.name - ref_id = ec_deployment.second_source.elasticsearch.0.ref_id - } + remote_cluster = [ + { + deployment_id = ec_deployment.source_deployment.id + alias = ec_deployment.source_deployment.name + ref_id = ec_deployment.source_deployment.elasticsearch.0.ref_id + }, + { + deployment_id = ec_deployment.second_source.id + alias = ec_deployment.second_source.name + ref_id = ec_deployment.second_source.elasticsearch.0.ref_id + } + ] } - kibana {} + kibana = {} } diff --git a/examples/deployment_ec2_instance/elastic_deployment.tf b/examples/deployment_ec2_instance/elastic_deployment.tf index f6f9073b2..8ac2a3a04 100644 --- a/examples/deployment_ec2_instance/elastic_deployment.tf +++ b/examples/deployment_ec2_instance/elastic_deployment.tf @@ -16,8 +16,14 @@ resource "ec_deployment" "deployment" { traffic_filter = [ec_deployment_traffic_filter.allow_my_instance.id] # Note the deployment will contain Elasticsearch and Kibana resources with default configurations. - elasticsearch {} - kibana {} + elasticsearch = { + config = {} + hot = { + autoscaling = {} + } + } + + kibana = {} } # Create a traffic filter to allow the instance's public IP address to access our deployment. diff --git a/examples/deployment_ec2_instance/provider.tf b/examples/deployment_ec2_instance/provider.tf index 2ff54e659..b14560c42 100644 --- a/examples/deployment_ec2_instance/provider.tf +++ b/examples/deployment_ec2_instance/provider.tf @@ -4,7 +4,7 @@ terraform { required_providers { ec = { source = "elastic/ec" - version = "0.5.0" + version = "0.6.0" } aws = { diff --git a/examples/deployment_with_init/deployment.tf b/examples/deployment_with_init/deployment.tf index 28892498e..0a7576fa1 100644 --- a/examples/deployment_with_init/deployment.tf +++ b/examples/deployment_with_init/deployment.tf @@ -14,17 +14,16 @@ resource "ec_deployment" "example_minimal" { version = data.ec_stack.latest.version deployment_template_id = "aws-io-optimized-v2" traffic_filter = [ec_deployment_traffic_filter.allow_all.id] - elasticsearch { - topology { - id = "hot_content" - size = "8g" + elasticsearch = { + config = {} + hot = { + size = "8g" + autoscaling = {} } } - kibana { - topology { - size = "1g" - } + kibana = { + size = "1g" } } diff --git a/examples/deployment_with_init/provider.tf b/examples/deployment_with_init/provider.tf index c4282ff46..485098007 100644 --- a/examples/deployment_with_init/provider.tf +++ b/examples/deployment_with_init/provider.tf @@ -4,7 +4,7 @@ terraform { required_providers { ec = { source = "elastic/ec" - version = "0.5.0" + version = "0.6.0" } } } diff --git a/examples/extension_bundle/extension.tf b/examples/extension_bundle/extension.tf index e1f1f903a..71047427b 100644 --- a/examples/extension_bundle/extension.tf +++ b/examples/extension_bundle/extension.tf @@ -4,7 +4,7 @@ terraform { required_providers { ec = { source = "elastic/ec" - version = "0.5.0" + version = "0.6.0" } } } diff --git a/go.mod b/go.mod index 5ea92a68b..72d8cfa11 100644 --- a/go.mod +++ b/go.mod @@ -3,17 +3,18 @@ module github.com/elastic/terraform-provider-ec go 1.19 require ( + github.com/blang/semver v3.5.1+incompatible github.com/blang/semver/v4 v4.0.0 github.com/elastic/cloud-sdk-go v1.10.0 - github.com/go-openapi/runtime v0.24.1 + github.com/go-openapi/runtime v0.24.2 github.com/go-openapi/strfmt v0.21.3 - github.com/hashicorp/terraform-plugin-framework v0.13.0 + github.com/hashicorp/terraform-plugin-framework v0.14.0 github.com/hashicorp/terraform-plugin-framework-validators v0.5.0 - github.com/hashicorp/terraform-plugin-go v0.14.0 - github.com/hashicorp/terraform-plugin-mux v0.7.0 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.23.0 - github.com/stretchr/testify v1.8.0 - golang.org/x/exp v0.0.0-20220921164117-439092de6870 + github.com/hashicorp/terraform-plugin-go v0.14.1 + github.com/hashicorp/terraform-plugin-log v0.7.0 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1 + github.com/stretchr/testify v1.8.1 + golang.org/x/exp v0.0.0-20221012211006-4de253d81b95 ) require ( @@ -39,15 +40,14 @@ require ( github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 // indirect github.com/hashicorp/go-hclog v1.3.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-plugin v1.4.5 // indirect + github.com/hashicorp/go-plugin v1.4.6 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/hc-install v0.4.0 // indirect - github.com/hashicorp/hcl/v2 v2.14.0 // indirect + github.com/hashicorp/hcl/v2 v2.15.0 // indirect github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.17.3 // indirect github.com/hashicorp/terraform-json v0.14.0 // indirect - github.com/hashicorp/terraform-plugin-log v0.7.0 // indirect github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c // indirect github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 // indirect github.com/hashicorp/yamux v0.1.1 // indirect @@ -67,15 +67,15 @@ require ( github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect github.com/vmihailenco/tagparser v0.1.2 // indirect - github.com/zclconf/go-cty v1.11.0 // indirect - go.mongodb.org/mongo-driver v1.10.2 // indirect - golang.org/x/crypto v0.0.0-20220919173607-35f4265a4bc0 // indirect - golang.org/x/net v0.0.0-20220921203646-d300de134e69 // indirect - golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 // indirect + github.com/zclconf/go-cty v1.12.1 // indirect + go.mongodb.org/mongo-driver v1.10.0 // indirect + golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d // indirect + golang.org/x/net v0.0.0-20220708220712-1185a9018129 // indirect + golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220921223823-23cae91e6737 // indirect - google.golang.org/grpc v1.49.0 // indirect + google.golang.org/genproto v0.0.0-20200711021454-869866162049 // indirect + google.golang.org/grpc v1.50.1 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 6946a79c1..a175ed3dd 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,4 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= @@ -31,8 +32,12 @@ github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:W github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -44,6 +49,8 @@ github.com/elastic/cloud-sdk-go v1.10.0 h1:1WBUkP71ogoxynWfaGg5Bm8Z36F4tL3bjiu+e github.com/elastic/cloud-sdk-go v1.10.0/go.mod h1:BMx5iwmVwL8gpomLSMPI6gcvfWzrV4KsWSnbPlWwlrI= github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= @@ -120,8 +127,8 @@ github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2g github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98= github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= github.com/go-openapi/runtime v0.23.0/go.mod h1:aQg+kaIQEn+A2CRSY1TxbM8+sT9g2V3aLc1FbIAnbbs= -github.com/go-openapi/runtime v0.24.1 h1:Sml5cgQKGYQHF+M7yYSHaH1eOjvTykrddTE/KtQVjqo= -github.com/go-openapi/runtime v0.24.1/go.mod h1:AKurw9fNre+h3ELZfk6ILsfvPN+bvvlaU/M9q/r9hpk= +github.com/go-openapi/runtime v0.24.2 h1:yX9HMGQbz32M87ECaAhGpJjBmErO3QLcgdZj9BzGx7c= +github.com/go-openapi/runtime v0.24.2/go.mod h1:AKurw9fNre+h3ELZfk6ILsfvPN+bvvlaU/M9q/r9hpk= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= @@ -203,10 +210,19 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= @@ -214,6 +230,7 @@ github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -237,8 +254,8 @@ github.com/hashicorp/go-hclog v1.3.1 h1:vDwF1DFNZhntP4DAjuTpOw3uEgMUpXh1pB5fW9Dq github.com/hashicorp/go-hclog v1.3.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.4.5 h1:oTE/oQR4eghggRg8VY7PAz3dr++VwDNBGCcOfIvHpBo= -github.com/hashicorp/go-plugin v1.4.5/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= +github.com/hashicorp/go-plugin v1.4.6 h1:MDV3UrKQBM3du3G7MApDGvOsMYy3JQJ4exhSoKBAeVA= +github.com/hashicorp/go-plugin v1.4.6/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -248,26 +265,24 @@ github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mO github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/hc-install v0.4.0 h1:cZkRFr1WVa0Ty6x5fTvL1TuO1flul231rWkGH92oYYk= github.com/hashicorp/hc-install v0.4.0/go.mod h1:5d155H8EC5ewegao9A4PUTMNPZaq+TbOzkJJZ4vrXeI= -github.com/hashicorp/hcl/v2 v2.14.0 h1:jX6+Q38Ly9zaAJlAjnFVyeNSNCKKW8D0wvyg7vij5Wc= -github.com/hashicorp/hcl/v2 v2.14.0/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0= +github.com/hashicorp/hcl/v2 v2.15.0 h1:CPDXO6+uORPjKflkWCCwoWc9uRp+zSIPcCQ+BrxV7m8= +github.com/hashicorp/hcl/v2 v2.15.0/go.mod h1:JRmR89jycNkrrqnMmvPDMd56n1rQJ2Q6KocSLCMCXng= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/terraform-exec v0.17.3 h1:MX14Kvnka/oWGmIkyuyvL6POx25ZmKrjlaclkx3eErU= github.com/hashicorp/terraform-exec v0.17.3/go.mod h1:+NELG0EqQekJzhvikkeQsOAZpsw0cv/03rbeQJqscAI= github.com/hashicorp/terraform-json v0.14.0 h1:sh9iZ1Y8IFJLx+xQiKHGud6/TSUCM0N8e17dKDpqV7s= github.com/hashicorp/terraform-json v0.14.0/go.mod h1:5A9HIWPkk4e5aeeXIBbkcOvaZbIYnAIkEyqP2pNSckM= -github.com/hashicorp/terraform-plugin-framework v0.13.0 h1:tGnqttzZwU3FKc+HasHr2Yi5L81FcQbdc8zQhbBD9jQ= -github.com/hashicorp/terraform-plugin-framework v0.13.0/go.mod h1:wcZdk4+Uef6Ng+BiBJjGAcIPlIs5bhlEV/TA1k6Xkq8= +github.com/hashicorp/terraform-plugin-framework v0.14.0 h1:Mwj55u+Jc/QGM6fLBPCe1P+ZF3cuYs6wbCdB15lx/Dg= +github.com/hashicorp/terraform-plugin-framework v0.14.0/go.mod h1:wcZdk4+Uef6Ng+BiBJjGAcIPlIs5bhlEV/TA1k6Xkq8= github.com/hashicorp/terraform-plugin-framework-validators v0.5.0 h1:eD79idhnJOBajkUMEbm0c8dOyOb/F49STbUEVojT6F4= github.com/hashicorp/terraform-plugin-framework-validators v0.5.0/go.mod h1:NfGgclDM3FZqvNVppPKE2aHI1JAyT002ypPRya7ch3I= -github.com/hashicorp/terraform-plugin-go v0.14.0 h1:ttnSlS8bz3ZPYbMb84DpcPhY4F5DsQtcAS7cHo8uvP4= -github.com/hashicorp/terraform-plugin-go v0.14.0/go.mod h1:2nNCBeRLaenyQEi78xrGrs9hMbulveqG/zDMQSvVJTE= +github.com/hashicorp/terraform-plugin-go v0.14.1 h1:cwZzPYla82XwAqpLhSzdVsOMU+6H29tczAwrB0z9Zek= +github.com/hashicorp/terraform-plugin-go v0.14.1/go.mod h1:Bc/K6K26BQ2FHqIELPbpKtt2CzzbQou+0UQF3/0NsCQ= github.com/hashicorp/terraform-plugin-log v0.7.0 h1:SDxJUyT8TwN4l5b5/VkiTIaQgY6R+Y2BQ0sRZftGKQs= github.com/hashicorp/terraform-plugin-log v0.7.0/go.mod h1:p4R1jWBXRTvL4odmEkFfDdhUjHf9zcs/BCoNHAc7IK4= -github.com/hashicorp/terraform-plugin-mux v0.7.0 h1:wRbSYzg+v2sn5Mdee0UKm4YTt4wJG0LfSwtgNuBkglY= -github.com/hashicorp/terraform-plugin-mux v0.7.0/go.mod h1:Ae30Mc5lz4d1awtiCbHP0YyvgBeiQ00Q1nAq0U3lb+I= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.23.0 h1:D4EeQm0piYXIHp6ZH3zjyP2Elq6voC64x3GZptaiefA= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.23.0/go.mod h1:xkJGavPvP9kYS/VbiW8o7JuTNgPwm7Tiw/Ie/b46r4c= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1 h1:zHcMbxY0+rFO9gY99elV/XC/UnQVg7FhRCbj1i5b7vM= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1/go.mod h1:+tNlb0wkfdsDJ7JEiERLz4HzM19HyiuIoGzTsM7rPpw= github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c h1:D8aRO6+mTqHfLsK/BC3j5OAoogv1WLRWzY1AaTo3rBg= github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c/go.mod h1:Wn3Na71knbXc1G8Lh+yu/dQWWJeFQEpDeJMtWMtlmNI= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 h1:HKLsbzeOsfXmKNpr3GiT18XAblV0BjCbzL8KQAMZGa0= @@ -359,6 +374,7 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -378,6 +394,7 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -385,8 +402,9 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= @@ -411,8 +429,8 @@ github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7Jul github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= -github.com/zclconf/go-cty v1.11.0 h1:726SxLdi2SDnjY+BStqB9J1hNp4+2WlzyXLuimibIe0= -github.com/zclconf/go-cty v1.11.0/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= +github.com/zclconf/go-cty v1.12.1 h1:PcupnljUm9EIvbgSHQnHhUr3fO6oFmkOrvs2BAFNXXY= +github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= @@ -426,9 +444,8 @@ go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R7 go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.8.2/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= +go.mongodb.org/mongo-driver v1.10.0 h1:UtV6N5k14upNp4LTduX0QCufG124fSu25Wz9tu94GLg= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.10.2 h1:4Wk3cnqOrQCn0P92L3/mmurMxzdvWWs5J9jinAVKD+k= -go.mongodb.org/mongo-driver v1.10.2/go.mod h1:z4XpeoU6w+9Vht+jAFyLgVrD+jGSQQe0+CBWFHNiHt8= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -443,15 +460,20 @@ golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220919173607-35f4265a4bc0 h1:a5Yg6ylndHHYJqIPrdq0AhvR6KTvDTAvgBtaidhEevY= -golang.org/x/crypto v0.0.0-20220919173607-35f4265a4bc0/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/exp v0.0.0-20220921164117-439092de6870 h1:j8b6j9gzSigH28O5SjSpQSSh9lFd6f5D/q0aHjNTulc= -golang.org/x/exp v0.0.0-20220921164117-439092de6870/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20221012211006-4de253d81b95 h1:sBdrWpxhGDdTAYNqbgBLAR+ULAPPhfgncLr1X0lyWtg= +golang.org/x/exp v0.0.0-20221012211006-4de253d81b95/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -473,16 +495,19 @@ golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1 golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220921203646-d300de134e69 h1:hUJpGDpnfwdJW8iNypFjmSY0sCBEL+spFTZ2eO+Sfps= -golang.org/x/net v0.0.0-20220921203646-d300de134e69/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220708220712-1185a9018129 h1:vucSRfWwTsoXro7P+3Cjlr6flUMtzCwzlvkxEQtHHB0= +golang.org/x/net v0.0.0-20220708220712-1185a9018129/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -511,9 +536,8 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab h1:2QkjZIsXupsJbJIdSjjUOgWK3aEtzyuh2mPt3l/CkeU= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 h1:h+EGohizhe9XlX18rfpa8k8RAc5XyaeamM+0VHRd4lc= -golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= @@ -528,10 +552,14 @@ golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -541,10 +569,24 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20220921223823-23cae91e6737 h1:K1zaaMdYBXRyX+cwFnxj7M6zwDyumLQMZ5xqwGvjreQ= -google.golang.org/genproto v0.0.0-20220921223823-23cae91e6737/go.mod h1:2r/26NEF3bFmT3eC3aZreahSal0C3Shl8Gi6vyDYqOQ= -google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw= -google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200711021454-869866162049 h1:YFTFpQhgvrLrmxtiIncJxFXeCyq84ixuKWVCaCAi9Oc= +google.golang.org/genproto v0.0.0-20200711021454-869866162049/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= @@ -571,3 +613,5 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/main.go b/main.go index f265a1c85..832b96f9e 100644 --- a/main.go +++ b/main.go @@ -24,11 +24,8 @@ import ( "github.com/elastic/terraform-provider-ec/ec" + "github.com/hashicorp/terraform-plugin-framework/provider" "github.com/hashicorp/terraform-plugin-framework/providerserver" - "github.com/hashicorp/terraform-plugin-go/tfprotov6" - "github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server" - "github.com/hashicorp/terraform-plugin-mux/tf5to6server" - "github.com/hashicorp/terraform-plugin-mux/tf6muxserver" ) //go:generate go run ./gen/gen.go @@ -37,41 +34,17 @@ import ( const ProviderAddr = "registry.terraform.io/elastic/ec" func main() { - debugFlag := flag.Bool("debug", false, "set to true to run the provider with support for debuggers like delve") - flag.Parse() - - upgradedSdkProvider, err := tf5to6server.UpgradeServer( - context.Background(), - ec.LegacyProvider().GRPCProvider, - ) - - if err != nil { - log.Fatal(err) - } - - ctx := context.Background() - providers := []func() tfprotov6.ProviderServer{ - func() tfprotov6.ProviderServer { return upgradedSdkProvider }, - providerserver.NewProtocol6(ec.New(ec.Version)), - } - - muxServer, err := tf6muxserver.NewMuxServer(ctx, providers...) + var debug bool - if err != nil { - log.Fatal(err) - } - - var serveOpts []tf6server.ServeOpt + flag.BoolVar(&debug, "debug", false, "set to true to run the provider with support for debuggers like delve") + flag.Parse() - if *debugFlag { - serveOpts = append(serveOpts, tf6server.WithManagedDebug()) + opts := providerserver.ServeOpts{ + Address: ProviderAddr, + Debug: debug, } - err = tf6server.Serve( - ProviderAddr, - muxServer.ProviderServer, - serveOpts..., - ) + err := providerserver.Serve(context.Background(), func() provider.Provider { return ec.New(ec.Version) }, opts) if err != nil { log.Fatal(err) From 40f4e5d8a25caac83bc4d5c4f63aeafb6c6058c0 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 7 Dec 2022 12:06:01 +0100 Subject: [PATCH 013/104] skip acceptance tests for checking upgrade from 0.4.1 --- ec/acc/deployment_elasticsearch_keystore_test.go | 2 ++ ec/acc/deployment_extension_basic_test.go | 2 ++ ec/acc/deployment_traffic_filter_association_test.go | 2 ++ ec/acc/deployment_traffic_filter_test.go | 2 ++ 4 files changed, 8 insertions(+) diff --git a/ec/acc/deployment_elasticsearch_keystore_test.go b/ec/acc/deployment_elasticsearch_keystore_test.go index 4ab06ad71..9d45d960b 100644 --- a/ec/acc/deployment_elasticsearch_keystore_test.go +++ b/ec/acc/deployment_elasticsearch_keystore_test.go @@ -119,6 +119,8 @@ func TestAccDeploymentElasticsearchKeystore_full(t *testing.T) { } func TestAccDeploymentElasticsearchKeystore_UpgradeFrom0_4_1(t *testing.T) { + t.Skip("skip until `ec_deployment` state upgrade is implemented") + resType := "ec_deployment_elasticsearch_keystore" firstResName := resType + ".test" secondResName := resType + ".gcs_creds" diff --git a/ec/acc/deployment_extension_basic_test.go b/ec/acc/deployment_extension_basic_test.go index c0fdfc5a0..84a8483ec 100644 --- a/ec/acc/deployment_extension_basic_test.go +++ b/ec/acc/deployment_extension_basic_test.go @@ -66,6 +66,8 @@ func TestAccDeploymentExtension_basic(t *testing.T) { } func TestAccDeploymentExtension_UpgradeFrom0_4_1(t *testing.T) { + t.Skip("skip until `ec_deployment` state upgrade is implemented") + resName := "ec_deployment_extension.my_extension" randomName := prefix + acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) diff --git a/ec/acc/deployment_traffic_filter_association_test.go b/ec/acc/deployment_traffic_filter_association_test.go index ac102c260..e279f5a13 100644 --- a/ec/acc/deployment_traffic_filter_association_test.go +++ b/ec/acc/deployment_traffic_filter_association_test.go @@ -73,6 +73,8 @@ func TestAccDeploymentTrafficFilterAssociation_basic(t *testing.T) { } func TestAccDeploymentTrafficFilterAssociation_UpgradeFrom0_4_1(t *testing.T) { + t.Skip("skip until `ec_deployment` state upgrade is implemented") + resName := "ec_deployment_traffic_filter.tf_assoc" resAssocName := "ec_deployment_traffic_filter_association.tf_assoc" randomName := acctest.RandomWithPrefix(prefix) diff --git a/ec/acc/deployment_traffic_filter_test.go b/ec/acc/deployment_traffic_filter_test.go index 46ca08f84..c22898b8d 100644 --- a/ec/acc/deployment_traffic_filter_test.go +++ b/ec/acc/deployment_traffic_filter_test.go @@ -114,6 +114,8 @@ func TestAccDeploymentTrafficFilter_azure(t *testing.T) { } func TestAccDeploymentTrafficFilter_UpgradeFrom0_4_1(t *testing.T) { + t.Skip("skip until `ec_deployment` state upgrade is implemented") + resName := "ec_deployment_traffic_filter.basic" randomName := prefix + acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) startCfg := "testdata/deployment_traffic_filter_basic.tf" From 0f9eab6af99da64450f67d6c985b58009f5eda3b Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 7 Dec 2022 15:39:33 +0100 Subject: [PATCH 014/104] remove leftovers that came after 0.4.1 --- .../deployment_elasticsearch_keystore_test.go | 7 -- ec/acc/deployment_template_migration_test.go | 96 --------------- .../v2/elasticsearch_payload_test.go | 109 ------------------ .../v2/observability_payload_test.go | 12 -- .../trafficfilterresource/expanders_test.go | 30 ----- ec/provider.go | 1 - 6 files changed, 255 deletions(-) delete mode 100644 ec/acc/deployment_template_migration_test.go diff --git a/ec/acc/deployment_elasticsearch_keystore_test.go b/ec/acc/deployment_elasticsearch_keystore_test.go index 979d42fda..9d45d960b 100644 --- a/ec/acc/deployment_elasticsearch_keystore_test.go +++ b/ec/acc/deployment_elasticsearch_keystore_test.go @@ -32,7 +32,6 @@ func TestAccDeploymentElasticsearchKeystore_full(t *testing.T) { var previousID, currentID string resType := "ec_deployment_elasticsearch_keystore" - deploymentResName := "ec_deployment.keystore" firstResName := resType + ".test" secondResName := resType + ".gcs_creds" randomName := prefix + acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) @@ -67,8 +66,6 @@ func TestAccDeploymentElasticsearchKeystore_full(t *testing.T) { resource.TestCheckResourceAttr(secondResName, "value", "{\n \"type\": \"service_account\",\n \"project_id\": \"project-id\",\n \"private_key_id\": \"key-id\",\n \"private_key\": \"-----BEGIN PRIVATE KEY-----\\nprivate-key\\n-----END PRIVATE KEY-----\\n\",\n \"client_email\": \"service-account-email\",\n \"client_id\": \"client-id\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/service-account-email\"\n}"), resource.TestCheckResourceAttr(secondResName, "as_file", "false"), resource.TestCheckResourceAttrSet(secondResName, "deployment_id"), - - checkExpectedKeystoreKeysExist(deploymentResName, "xpack.notification.slack.account.hello.secure_url", "gcs.client.secondary.credentials_file"), ), }, { @@ -85,8 +82,6 @@ func TestAccDeploymentElasticsearchKeystore_full(t *testing.T) { resource.TestCheckResourceAttr(secondResName, "value", "{\n \"type\": \"service_account\",\n \"project_id\": \"project-id\",\n \"private_key_id\": \"key-id\",\n \"private_key\": \"-----BEGIN PRIVATE KEY-----\\nprivate-key\\n-----END PRIVATE KEY-----\\n\",\n \"client_email\": \"service-account-email\",\n \"client_id\": \"client-id\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/service-account-email\"\n}"), resource.TestCheckResourceAttr(secondResName, "as_file", "false"), resource.TestCheckResourceAttrSet(secondResName, "deployment_id"), - - checkExpectedKeystoreKeysExist(deploymentResName, "xpack.notification.slack.account.hello.secure_url", "gcs.client.secondary.credentials_file"), ), }, { @@ -103,8 +98,6 @@ func TestAccDeploymentElasticsearchKeystore_full(t *testing.T) { resource.TestCheckResourceAttr(secondResName, "value", "{\n \"type\": \"service_account\",\n \"project_id\": \"project-id\",\n \"private_key_id\": \"key-id\",\n \"private_key\": \"-----BEGIN PRIVATE KEY-----\\nprivate-key\\n-----END PRIVATE KEY-----\\n\",\n \"client_email\": \"service-account-email\",\n \"client_id\": \"client-id\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/service-account-email\"\n}"), resource.TestCheckResourceAttr(secondResName, "as_file", "false"), resource.TestCheckResourceAttrSet(secondResName, "deployment_id"), - - checkExpectedKeystoreKeysExist(deploymentResName, "xpack.notification.slack.account.hello.secure_urla", "gcs.client.secondary.credentials_file"), ), }, { diff --git a/ec/acc/deployment_template_migration_test.go b/ec/acc/deployment_template_migration_test.go deleted file mode 100644 index 9b01f3d9d..000000000 --- a/ec/acc/deployment_template_migration_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package acc - -import ( - "testing" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" -) - -func TestAccDeployment_template_migration(t *testing.T) { - resName := "ec_deployment.compute_optimized" - randomName := prefix + acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - basicCfg := "testdata/deployment_compute_optimized_1.tf" - region := getRegion() - cfg := fixtureAccDeploymentResourceBasicDefaults(t, basicCfg, randomName, region, computeOpTemplate) - secondConfigCfg := fixtureAccDeploymentResourceBasicDefaults(t, basicCfg, randomName, region, memoryOpTemplate) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, - Steps: []resource.TestStep{ - { - // Create a Compute Optimized deployment with the default settings. - Config: cfg, - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "deployment_template_id", setDefaultTemplate(region, computeOpTemplate)), - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), - ), - }, - { - // Change the deployment to memory optimized - Config: secondConfigCfg, - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "deployment_template_id", setDefaultTemplate(region, memoryOpTemplate)), - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), - ), - }, - }, - }) -} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go index 3084ee4aa..88971b57a 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go @@ -574,115 +574,6 @@ func Test_writeElasticsearch(t *testing.T) { }, }), }, - { - name: "parses an ES resource with explicit nils", - args: args{ - dt: hotWarmTpl770(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "deployment_template_id": "aws-hot-warm-v2", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": nil, - }}, - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "size": nil, - "zone_count": 1, - }, - map[string]interface{}{ - "id": "warm", - "size": "2g", - "zone_count": nil, - }, - }, - }, - }, - }, - want: enrichWithEmptyTopologies(hotWarmTpl770(), &models.ElasticsearchPayload{ - Region: ec.String("some-region"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - Curation: nil, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.7.0", - Curation: nil, - UserSettingsYaml: "", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-hot-warm-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "hot", - }, - }, - ZoneCount: 1, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - { - ID: "warm", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "warm", - }, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highstorage.d2", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(false), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - }, { name: "parses an ES resource without a topology (HotWarm)", args: args{ diff --git a/ec/ecresource/deploymentresource/observability/v2/observability_payload_test.go b/ec/ecresource/deploymentresource/observability/v2/observability_payload_test.go index bbba52abe..f4fd06fd7 100644 --- a/ec/ecresource/deploymentresource/observability/v2/observability_payload_test.go +++ b/ec/ecresource/deploymentresource/observability/v2/observability_payload_test.go @@ -70,18 +70,6 @@ func Test_observabilityPayload(t *testing.T) { }, }, }, - { - name: "handles explicit nils", - args: args{ - v: []interface{}{map[string]interface{}{ - "deployment_id": mock.ValidClusterID, - "ref_id": "main-elasticsearch", - "metrics": nil, - "logs": nil, - }}, - }, - want: &models.DeploymentObservabilitySettings{}, - }, { name: "expands all observability settings", args: args{ diff --git a/ec/ecresource/trafficfilterresource/expanders_test.go b/ec/ecresource/trafficfilterresource/expanders_test.go index 108bfb76d..d788eb302 100644 --- a/ec/ecresource/trafficfilterresource/expanders_test.go +++ b/ec/ecresource/trafficfilterresource/expanders_test.go @@ -117,36 +117,6 @@ func Test_expandModel(t *testing.T) { }, }, }, - { - name: "parses an privatelink resource with explicit nils", - args: args{d: util.NewResourceData(t, util.ResDataParams{ - ID: "some-random-id", - State: map[string]interface{}{ - "name": "my traffic filter", - "type": "azure_private_endpoint", - "include_by_default": false, - "region": "azure-australiaeast", - "rule": []interface{}{map[string]interface{}{ - "description": nil, - "azure_endpoint_guid": "1231312-1231-1231-1231-1231312", - "azure_endpoint_name": "my-azure-pl", - }}, - }, - Schema: newSchema(), - })}, - want: &models.TrafficFilterRulesetRequest{ - Name: ec.String("my traffic filter"), - Type: ec.String("azure_private_endpoint"), - IncludeByDefault: ec.Bool(false), - Region: ec.String("azure-australiaeast"), - Rules: []*models.TrafficFilterRule{ - { - AzureEndpointGUID: "1231312-1231-1231-1231-1231312", - AzureEndpointName: "my-azure-pl", - }, - }, - }, - }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/ec/provider.go b/ec/provider.go index cf6c69b2c..ac765fad8 100644 --- a/ec/provider.go +++ b/ec/provider.go @@ -35,7 +35,6 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/terraform-provider-ec/ec/ecdatasource/deploymentdatasource" "github.com/elastic/terraform-provider-ec/ec/ecdatasource/deploymentsdatasource" - "github.com/elastic/terraform-provider-ec/ec/ecdatasource/privatelinkdatasource" "github.com/elastic/terraform-provider-ec/ec/ecdatasource/stackdatasource" "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource" "github.com/elastic/terraform-provider-ec/ec/ecresource/elasticsearchkeystoreresource" From a52f732597de811bfa0ae855df4ac81bcbec9ef8 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 7 Dec 2022 16:32:18 +0100 Subject: [PATCH 015/104] update go.mod --- go.mod | 2 -- go.sum | 8 +++----- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index 8583331ac..72d8cfa11 100644 --- a/go.mod +++ b/go.mod @@ -69,8 +69,6 @@ require ( github.com/vmihailenco/tagparser v0.1.2 // indirect github.com/zclconf/go-cty v1.12.1 // indirect go.mongodb.org/mongo-driver v1.10.0 // indirect - go.opentelemetry.io/otel v1.11.1 // indirect - go.opentelemetry.io/otel/trace v1.11.1 // indirect golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d // indirect golang.org/x/net v0.0.0-20220708220712-1185a9018129 // indirect golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab // indirect diff --git a/go.sum b/go.sum index 4542d1569..a175ed3dd 100644 --- a/go.sum +++ b/go.sum @@ -66,11 +66,6 @@ github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4= github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -357,6 +352,7 @@ github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= @@ -447,6 +443,7 @@ go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.8.2/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= +go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= go.mongodb.org/mongo-driver v1.10.0 h1:UtV6N5k14upNp4LTduX0QCufG124fSu25Wz9tu94GLg= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -544,6 +541,7 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= From 548a37b9bf5c5afe1634a0b932e77a7cdd9d90ef Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 7 Dec 2022 16:54:50 +0100 Subject: [PATCH 016/104] Update changelog --- CHANGELOG.md | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ef0490875..fd19a559f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,33 @@ -# 0.6.0 (Unreleased) +# 0.7.0 (Unreleased) FEATURES: * resource/deployment: Utilise the template migration API to build the base update request when changing `deployment_template_id`. This results in more reliable changes between deployment templates. ([#547](https://github.com/elastic/terraform-provider-ec/issues/547)) +# 0.6.0 (Unreleased) + +FEATURES: + +Migration to [TF Plugin Framework](https://developer.hashicorp.com/terraform/plugin/framework) + +BREAKING CHANGES: + +New schema for `ec_deployment` + +BUG FIXES: + +[#336](https://github.com/elastic/terraform-provider-ec/issues/336) +[#467](https://github.com/elastic/terraform-provider-ec/issues/467) +[#445](https://github.com/elastic/terraform-provider-ec/issues/445) + +NOTES + +* The migration is based on 0.4.1, so all changes from 0.5.0 are omitted. + +* State upgrade is not yet implemented for `ec_deployment`. + The recommended way to proceed with existing TF resources is [state import](https://developer.hashicorp.com/terraform/cli/import#state-only). + However, this doesn't import user passwords and secret tokens. + # 0.5.0 (Oct 12, 2022) FEATURES: From 50623f92cd413c2d06b176708c019ec94c3c3c28 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 7 Dec 2022 18:20:36 +0100 Subject: [PATCH 017/104] address PR comments in README --- README.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index d160ac2cc..896fa8494 100644 --- a/README.md +++ b/README.md @@ -123,9 +123,9 @@ After doing so, you can navigate to any of our examples in `./examples` and try ### Moving to TF Framework and schema change for `ec_deployment` resource. -v6.0.0 contains migration to [the TF Plugin Framework](https://developer.hashicorp.com/terraform/plugin/framework) and intoducing version 2 for `ec_deployment` resource: +v0.6.0 contains migration to [TF Plugin Framework](https://developer.hashicorp.com/terraform/plugin/framework) and intoduces new schema for `ec_deployment` resource: -- switching to attributes syntax instead of blocks for almost all definitions that used to be blocks. It means that, for example, a definition like `config {}` has to be changed to `config = {}`, e.g. +- switching to attributes syntax instead of blocks for almost all definitions that used to be blocks. It means that, for example, a definition like `elasticsearch {...}` has to be changed to `elasticsearch = {...}`, e.g. ```hcl resource "ec_deployment" "defaults" { @@ -209,7 +209,7 @@ resource "ec_deployment" "defaults" { } ``` -Please note that the configuration explicitly mentions `hot` tier and the tier has `autoscaling` and `config` attributes even despite the fact that they are empty. If they were omitted, TF (at least up to version 1.3.3) could complain `Error: Provider produced inconsistent result after apply`. +Please note that the snippet explicitly mentions `hot` tier with `autoscaling` attribute even despite the fact that they are empty. - a lot of attributes that used to be collections (e.g. lists and sets) are converted to sigletons, e.g. `elasticsearch`, `apm`, `kibana`, `enterprise_search`, `observability`, `topology`, `autoscaling`, etc. Please note that, generally, users are not expected to make any change to their existing configuration to address this particular change (besides moving from block to attribute syntax). All these components used to exist in single instances, so the change is mostly syntactical, taking into account the switch to attributes instead of blocks (otherwise if we kept list for configs, `config {}` had to be rewritten in `config = [{}]` with the move to the attribute syntax). However this change is a breaking one from the schema perspective and requires state upgrade for existing resources that is performed by TF (by calling the provider's API). @@ -227,6 +227,8 @@ There are 2 ways to tackle this - state upgrade that is performed by TF by calling the provider's API so no action is required from user perspective Currently the state upgrade functionality is still in development so importing existing resources is the recommended way to deal with existing TF states. +Please mind the fact that state import doesn't import user passwords and secret tokens that can be the case if your TF modules make use of them. +State upgrade doesn't have this limitation. #### Known issues. @@ -235,4 +237,4 @@ This happens because TF Framework treats all `computed` attributes as `unknown` `ec_deployment` schema contains quite a few of such attributes, so `terraform plan`'s output can be quite big for the resource due to the mentioned reason. However, it doesn't mean that all attributes that marked as `unknown` in the plan will get new values after apply. To mitigitate the problem, the provider uses plan modifiers that is a recommended way to reduce plan output. -However, currently plan modifiers don't cover the all `computed` attributes. +However, currently plan modifiers don't cover all the `computed` attributes. From e24409454e2f915cccf3286d80ad2582a9207bcd Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 7 Dec 2022 18:21:05 +0100 Subject: [PATCH 018/104] remove obsolete space --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fd19a559f..01b82e577 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,7 +9,7 @@ FEATURES: FEATURES: Migration to [TF Plugin Framework](https://developer.hashicorp.com/terraform/plugin/framework) - + BREAKING CHANGES: New schema for `ec_deployment` From 58b16d50e2a1d9dcb46318118aaff1ef701e5f23 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Tue, 13 Dec 2022 13:50:32 +0100 Subject: [PATCH 019/104] Address PR comments in provider_config.go --- ec/internal/planmodifier/default_from_env.go | 63 ------ ec/internal/util/helpers.go | 8 +- ec/provider.go | 109 +++------- ec/provider_config.go | 62 ++---- ec/provider_config_test.go | 210 ++++++++----------- 5 files changed, 139 insertions(+), 313 deletions(-) delete mode 100644 ec/internal/planmodifier/default_from_env.go diff --git a/ec/internal/planmodifier/default_from_env.go b/ec/internal/planmodifier/default_from_env.go deleted file mode 100644 index 8a3dcd526..000000000 --- a/ec/internal/planmodifier/default_from_env.go +++ /dev/null @@ -1,63 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package planmodifier - -import ( - "context" - "fmt" - - "github.com/hashicorp/terraform-plugin-framework/tfsdk" - "github.com/hashicorp/terraform-plugin-framework/types" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -// defaultFromEnvAttributePlanModifier specifies a default value (attr.Value) for an attribute. -type defaultFromEnvAttributePlanModifier struct { - EnvKeys []string -} - -// DefaultFromEnv is a helper to instantiate a defaultFromEnvAttributePlanModifier. -func DefaultFromEnv(envKeys []string) tfsdk.AttributePlanModifier { - return &defaultFromEnvAttributePlanModifier{envKeys} -} - -var _ tfsdk.AttributePlanModifier = (*defaultFromEnvAttributePlanModifier)(nil) - -func (m *defaultFromEnvAttributePlanModifier) Description(ctx context.Context) string { - return m.MarkdownDescription(ctx) -} - -func (m *defaultFromEnvAttributePlanModifier) MarkdownDescription(ctx context.Context) string { - return fmt.Sprintf("Sets the default value from an environment variable (%v) if the attribute is not set", m.EnvKeys) -} - -func (m *defaultFromEnvAttributePlanModifier) Modify(_ context.Context, req tfsdk.ModifyAttributePlanRequest, res *tfsdk.ModifyAttributePlanResponse) { - // If the attribute configuration is not null, we are done here - if !req.AttributeConfig.IsNull() { - return - } - - // If the attribute plan is "known" and "not null", then a previous plan m in the sequence - // has already been applied, and we don't want to interfere. - if !req.AttributePlan.IsUnknown() && !req.AttributePlan.IsNull() { - return - } - - res.AttributePlan = types.String{Value: util.MultiGetenv(m.EnvKeys, "")} -} diff --git a/ec/internal/util/helpers.go b/ec/internal/util/helpers.go index 960ab8c19..8f9c72fc9 100644 --- a/ec/internal/util/helpers.go +++ b/ec/internal/util/helpers.go @@ -80,10 +80,10 @@ func IsCurrentEssPlanEmpty(res *models.EnterpriseSearchResourceInfo) bool { return emptyPlanInfo || res.Info.PlanInfo.Current.Plan == nil } -// MultiGetenv returns the value of the first environment variable in the +// MultiGetenvOrDefault returns the value of the first environment variable in the // given list that has a non-empty value. If none of the environment // variables have a value, the default value is returned. -func MultiGetenv(keys []string, defaultValue string) string { +func MultiGetenvOrDefault(keys []string, defaultValue string) string { for _, key := range keys { if value := os.Getenv(key); value != "" { return value @@ -96,14 +96,14 @@ func GetStringFromSchemaOrEnv(d *schema.ResourceData, key string, envKeys []stri if value, ok := d.GetOk(key); ok { return value.(string) } - return MultiGetenv(envKeys, defaultValue) + return MultiGetenvOrDefault(envKeys, defaultValue) } func GetBoolFromSchemaOrEnv(d *schema.ResourceData, key string, envKeys []string) bool { if value, ok := d.GetOk(key); ok { return value.(bool) } - strValue := MultiGetenv(envKeys, "false") + strValue := MultiGetenvOrDefault(envKeys, "false") value, err := StringToBool(strValue) if err != nil { return false diff --git a/ec/provider.go b/ec/provider.go index ac765fad8..34a75bc3a 100644 --- a/ec/provider.go +++ b/ec/provider.go @@ -29,9 +29,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/terraform-provider-ec/ec/ecdatasource/deploymentdatasource" "github.com/elastic/terraform-provider-ec/ec/ecdatasource/deploymentsdatasource" @@ -41,7 +38,6 @@ import ( "github.com/elastic/terraform-provider-ec/ec/ecresource/extensionresource" "github.com/elastic/terraform-provider-ec/ec/ecresource/trafficfilterassocresource" "github.com/elastic/terraform-provider-ec/ec/ecresource/trafficfilterresource" - "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" "github.com/elastic/terraform-provider-ec/ec/internal/util" "github.com/elastic/terraform-provider-ec/ec/internal/validators" ) @@ -70,61 +66,6 @@ var ( defaultTimeout = 40 * time.Second ) -func newSchema() map[string]*schema.Schema { - // This schema must match exactly the Terraform Protocol v6 (Terraform Plugin Framework) provider's schema. - // Notably the attributes can have no Default values. - return map[string]*schema.Schema{ - "endpoint": { - Description: fmt.Sprintf(endpointDesc, api.ESSEndpoint), - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.IsURLWithScheme(validURLSchemes), - }, - "apikey": { - Description: apikeyDesc, - Type: schema.TypeString, - Optional: true, - Sensitive: true, - }, - "username": { - Description: usernameDesc, - Type: schema.TypeString, - Optional: true, - }, - "password": { - Description: passwordDesc, - Type: schema.TypeString, - Optional: true, - Sensitive: true, - }, - "insecure": { - Description: insecureDesc, - Type: schema.TypeBool, - Optional: true, - }, - "timeout": { - Description: timeoutDesc, - Type: schema.TypeString, - Optional: true, - }, - "verbose": { - Description: verboseDesc, - Type: schema.TypeBool, - Optional: true, - }, - "verbose_credentials": { - Description: verboseCredsDesc, - Type: schema.TypeBool, - Optional: true, - }, - "verbose_file": { - Description: timeoutDesc, - Type: schema.TypeString, - Optional: true, - }, - } -} - func New(version string) provider.Provider { return &Provider{version: version} } @@ -175,11 +116,10 @@ func (p *Provider) GetSchema(context.Context) (tfsdk.Schema, diag.Diagnostics) { Validators: []tfsdk.AttributeValidator{validators.Known(), validators.IsURLWithSchemeValidator(validURLSchemes)}, }, "apikey": { - Description: apikeyDesc, - Type: types.StringType, - Optional: true, - Sensitive: true, - PlanModifiers: []tfsdk.AttributePlanModifier{planmodifier.DefaultFromEnv([]string{"EC_API_KEY"})}, + Description: apikeyDesc, + Type: types.StringType, + Optional: true, + Sensitive: true, }, "username": { Description: usernameDesc, @@ -251,7 +191,7 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, var endpoint string if config.Endpoint.Null { - endpoint = util.MultiGetenv([]string{"EC_ENDPOINT", "EC_HOST"}, api.ESSEndpoint) + endpoint = util.MultiGetenvOrDefault([]string{"EC_ENDPOINT", "EC_HOST"}, api.ESSEndpoint) // TODO We need to validate the endpoint here, similar to how it is done if the value is passed via terraform (isURLWithSchemeValidator) } else { endpoint = config.Endpoint.Value @@ -259,21 +199,21 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, var apiKey string if config.ApiKey.Null { - apiKey = util.MultiGetenv([]string{"EC_API_KEY"}, "") + apiKey = util.MultiGetenvOrDefault([]string{"EC_API_KEY"}, "") } else { apiKey = config.ApiKey.Value } var username string if config.Username.Null { - username = util.MultiGetenv([]string{"EC_USER", "EC_USERNAME"}, "") + username = util.MultiGetenvOrDefault([]string{"EC_USER", "EC_USERNAME"}, "") } else { username = config.Username.Value } var password string if config.Password.Null { - password = util.MultiGetenv([]string{"EC_PASS", "EC_PASSWORD"}, "") + password = util.MultiGetenvOrDefault([]string{"EC_PASS", "EC_PASSWORD"}, "") } else { password = config.Password.Value } @@ -281,7 +221,7 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, var err error var insecure bool if config.Insecure.Null { - insecureStr := util.MultiGetenv([]string{"EC_INSECURE", "EC_SKIP_TLS_VALIDATION"}, "") + insecureStr := util.MultiGetenvOrDefault([]string{"EC_INSECURE", "EC_SKIP_TLS_VALIDATION"}, "") if insecure, err = util.StringToBool(insecureStr); err != nil { res.Diagnostics.AddWarning( "Unable to create client", @@ -295,14 +235,14 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, var timeout string if config.Timeout.Null { - timeout = util.MultiGetenv([]string{"EC_TIMEOUT"}, defaultTimeout.String()) + timeout = util.MultiGetenvOrDefault([]string{"EC_TIMEOUT"}, defaultTimeout.String()) } else { timeout = config.Timeout.Value } var verbose bool if config.Verbose.Null { - verboseStr := util.MultiGetenv([]string{"EC_VERBOSE"}, "") + verboseStr := util.MultiGetenvOrDefault([]string{"EC_VERBOSE"}, "") if verbose, err = util.StringToBool(verboseStr); err != nil { res.Diagnostics.AddWarning( "Unable to create client", @@ -316,7 +256,7 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, var verboseCredentials bool if config.VerboseCredentials.Null { - verboseCredentialsStr := util.MultiGetenv([]string{"EC_VERBOSE_CREDENTIALS"}, "") + verboseCredentialsStr := util.MultiGetenvOrDefault([]string{"EC_VERBOSE_CREDENTIALS"}, "") if verboseCredentials, err = util.StringToBool(verboseCredentialsStr); err != nil { res.Diagnostics.AddWarning( "Unable to create client", @@ -330,22 +270,23 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, var verboseFile string if config.VerboseFile.Null { - verboseFile = util.MultiGetenv([]string{"EC_VERBOSE_FILE"}, "request.log") + verboseFile = util.MultiGetenvOrDefault([]string{"EC_VERBOSE_FILE"}, "request.log") } else { verboseFile = config.VerboseFile.Value } - cfg, err := newAPIConfig( - endpoint, - apiKey, - username, - password, - insecure, - timeout, - verbose, - verboseCredentials, - verboseFile, - ) + cfg, err := newAPIConfig(apiSetup{ + endpoint: endpoint, + apikey: apiKey, + username: username, + password: password, + insecure: insecure, + timeout: timeout, + verbose: verbose, + verboseCredentials: verboseCredentials, + verboseFile: verboseFile, + }) + if err != nil { res.Diagnostics.AddWarning( "Unable to create api Client config", diff --git a/ec/provider_config.go b/ec/provider_config.go index 9816c1f80..175d4b45c 100644 --- a/ec/provider_config.go +++ b/ec/provider_config.go @@ -23,12 +23,8 @@ import ( "os" "time" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/auth" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" ) const ( @@ -40,52 +36,40 @@ var ( DefaultHTTPRetries = 2 ) -func newAPIConfigLegacy(d *schema.ResourceData) (api.Config, error) { - endpoint := util.GetStringFromSchemaOrEnv(d, "endpoint", []string{"EC_ENDPOINT", "EC_HOST"}, api.ESSEndpoint) - apiKey := util.GetStringFromSchemaOrEnv(d, "apikey", []string{"EC_API_KEY"}, "") - username := util.GetStringFromSchemaOrEnv(d, "username", []string{"EC_USER", "EC_USERNAME"}, "") - password := util.GetStringFromSchemaOrEnv(d, "password", []string{"EC_PASS", "EC_PASSWORD"}, "") - timeout := util.GetStringFromSchemaOrEnv(d, "timeout", []string{"EC_TIMEOUT"}, defaultTimeout.String()) - insecure := util.GetBoolFromSchemaOrEnv(d, "insecure", []string{"EC_INSECURE", "EC_SKIP_TLS_VALIDATION"}) - verbose := util.GetBoolFromSchemaOrEnv(d, "verbose", []string{"EC_VERBOSE"}) - verboseCredentials := util.GetBoolFromSchemaOrEnv(d, "verbose_credentials", []string{"EC_VERBOSE_CREDENTIALS"}) - verboseFile := util.GetStringFromSchemaOrEnv(d, "verbose_file", []string{"EC_VERBOSE_FILE"}, "request.log") - cfg, err := newAPIConfig(endpoint, apiKey, username, password, insecure, timeout, verbose, verboseCredentials, verboseFile) - if err != nil { - return api.Config{}, err - } - return cfg, nil +type apiSetup struct { + endpoint string + apikey string + username string + password string + insecure bool + timeout string + verbose bool + verboseCredentials bool + verboseFile string } -func newAPIConfig(endpoint string, - apiKey string, - username string, - password string, - insecure bool, - timeout string, - verbose bool, - verboseCredentials bool, - verboseFile string) (api.Config, error) { +func newAPIConfig(setup apiSetup) (api.Config, error) { + var cfg api.Config - timeoutDuration, err := time.ParseDuration(timeout) + authWriter, err := auth.NewAuthWriter(auth.Config{ + APIKey: setup.apikey, + Username: setup.username, + Password: setup.password, + }) if err != nil { return cfg, err } - authWriter, err := auth.NewAuthWriter(auth.Config{ - APIKey: apiKey, - Username: username, - Password: password, - }) + timeoutDuration, err := time.ParseDuration(setup.timeout) if err != nil { return cfg, err } verboseCfg, err := verboseSettings( - verboseFile, - verbose, - !verboseCredentials, + setup.verboseFile, + setup.verbose, + !setup.verboseCredentials, ) if err != nil { return cfg, err @@ -96,8 +80,8 @@ func newAPIConfig(endpoint string, Client: &http.Client{}, VerboseSettings: verboseCfg, AuthWriter: authWriter, - Host: endpoint, - SkipTLSVerify: insecure, + Host: setup.endpoint, + SkipTLSVerify: setup.insecure, Timeout: timeoutDuration, UserAgent: userAgent(Version), Retries: DefaultHTTPRetries, diff --git a/ec/provider_config_test.go b/ec/provider_config_test.go index 5a0dffa94..c3b23c343 100644 --- a/ec/provider_config_test.go +++ b/ec/provider_config_test.go @@ -26,14 +26,11 @@ import ( "syscall" "testing" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/stretchr/testify/assert" "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/auth" "github.com/elastic/cloud-sdk-go/pkg/multierror" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_verboseSettings(t *testing.T) { @@ -102,61 +99,14 @@ func Test_verboseSettings(t *testing.T) { } func Test_newAPIConfig(t *testing.T) { - defer unsetECAPIKey(t)() - - defaultCfg := util.NewResourceData(t, util.ResDataParams{ - ID: "whocares", - Schema: newSchema(), - State: map[string]interface{}{}, - }) - invalidTimeoutCfg := util.NewResourceData(t, util.ResDataParams{ - ID: "whocares", - Schema: newSchema(), - State: map[string]interface{}{ - "timeout": "invalid", - }, - }) - - apiKeyCfg := util.NewResourceData(t, util.ResDataParams{ - ID: "whocares", - Schema: newSchema(), - State: map[string]interface{}{ - "apikey": "blih", - }, - }) - apiKeyObj := auth.APIKey("blih") + apiKeyObj := auth.APIKey("secret") - userPassCfg := util.NewResourceData(t, util.ResDataParams{ - ID: "whocares", - Schema: newSchema(), - State: map[string]interface{}{ - "username": "my-user", - "password": "my-pass", - }, - }) userPassObj := auth.UserLogin{ Username: "my-user", Password: "my-pass", Holder: new(auth.GenericHolder), } - insecureCfg := util.NewResourceData(t, util.ResDataParams{ - ID: "whocares", - Schema: newSchema(), - State: map[string]interface{}{ - "apikey": "blih", - "insecure": true, - }, - }) - - verboseCfg := util.NewResourceData(t, util.ResDataParams{ - ID: "whocares", - Schema: newSchema(), - State: map[string]interface{}{ - "apikey": "blih", - "verbose": true, - }, - }) defer func() { os.Remove("request.log") }() @@ -165,43 +115,18 @@ func Test_newAPIConfig(t *testing.T) { if err != nil { t.Fatal(err) } + defer func() { customFile.Close() os.Remove(customFile.Name()) }() - verboseCustomFileCfg := util.NewResourceData(t, util.ResDataParams{ - ID: "whocares", - Schema: newSchema(), - State: map[string]interface{}{ - "apikey": "blih", - "verbose": true, - "verbose_file": customFile.Name(), - }, - }) - verboseAndCredsCustomFileCfg := util.NewResourceData(t, util.ResDataParams{ - ID: "whocares", - Schema: newSchema(), - State: map[string]interface{}{ - "apikey": "blih", - "verbose": true, - "verbose_file": customFile.Name(), - "verbose_credentials": true, - }, - }) + invalidPath := filepath.Join("a", "b", "c", "d", "e", "f", "g", "h", "invalid!") - verboseInvalidFileCfg := util.NewResourceData(t, util.ResDataParams{ - ID: "whocares", - Schema: newSchema(), - State: map[string]interface{}{ - "apikey": "blih", - "verbose": true, - "verbose_file": invalidPath, - "verbose_credentials": true, - }, - }) + type args struct { - d *schema.ResourceData + apiSetup apiSetup } + tests := []struct { name string args args @@ -211,19 +136,36 @@ func Test_newAPIConfig(t *testing.T) { }{ { name: "default config returns with authwriter error", - args: args{d: defaultCfg}, + args: args{ + apiSetup: apiSetup{ + timeout: defaultTimeout.String(), + }, + }, err: multierror.NewPrefixed("authwriter", errors.New("one of apikey or username and password must be specified"), ), }, + { - name: "default config with invalid timeout returns with authwriter error", - args: args{d: invalidTimeoutCfg}, - err: errors.New(`time: invalid duration "invalid"`), + name: "default config with invalid timeout returns with authwriter error", + args: args{ + apiSetup: apiSetup{ + timeout: "invalid", + apikey: "secret", + }, + }, + err: errors.New(`time: invalid duration "invalid"`), }, + { name: "custom config with apikey auth succeeds", - args: args{d: apiKeyCfg}, + args: args{ + apiSetup: apiSetup{ + apikey: "secret", + timeout: defaultTimeout.String(), + endpoint: api.ESSEndpoint, + }, + }, want: api.Config{ UserAgent: fmt.Sprintf(providerUserAgentFmt, Version, api.DefaultUserAgent), ErrorDevice: os.Stdout, @@ -234,9 +176,17 @@ func Test_newAPIConfig(t *testing.T) { Retries: DefaultHTTPRetries, }, }, + { name: "custom config with username/password auth succeeds", - args: args{d: userPassCfg}, + args: args{ + apiSetup: apiSetup{ + username: "my-user", + password: "my-pass", + timeout: defaultTimeout.String(), + endpoint: api.ESSEndpoint, + }, + }, want: api.Config{ UserAgent: fmt.Sprintf(providerUserAgentFmt, Version, api.DefaultUserAgent), ErrorDevice: os.Stdout, @@ -247,23 +197,17 @@ func Test_newAPIConfig(t *testing.T) { Retries: DefaultHTTPRetries, }, }, + { name: "custom config with insecure succeeds", - args: args{d: insecureCfg}, - want: api.Config{ - UserAgent: fmt.Sprintf(providerUserAgentFmt, Version, api.DefaultUserAgent), - ErrorDevice: os.Stdout, - Host: api.ESSEndpoint, - AuthWriter: &apiKeyObj, - Client: &http.Client{}, - Timeout: defaultTimeout, - Retries: DefaultHTTPRetries, - SkipTLSVerify: true, + args: args{ + apiSetup: apiSetup{ + apikey: "secret", + insecure: true, + timeout: defaultTimeout.String(), + endpoint: api.ESSEndpoint, + }, }, - }, - { - name: "custom config with insecure succeeds", - args: args{d: insecureCfg}, want: api.Config{ UserAgent: fmt.Sprintf(providerUserAgentFmt, Version, api.DefaultUserAgent), ErrorDevice: os.Stdout, @@ -275,9 +219,18 @@ func Test_newAPIConfig(t *testing.T) { SkipTLSVerify: true, }, }, + { name: "custom config with verbose (default file) succeeds", - args: args{d: verboseCfg}, + args: args{ + apiSetup: apiSetup{ + apikey: "secret", + verbose: true, + verboseFile: "request.log", + timeout: defaultTimeout.String(), + endpoint: api.ESSEndpoint, + }, + }, want: api.Config{ UserAgent: fmt.Sprintf(providerUserAgentFmt, Version, api.DefaultUserAgent), ErrorDevice: os.Stdout, @@ -293,9 +246,18 @@ func Test_newAPIConfig(t *testing.T) { }, wantFileName: "request.log", }, + { name: "custom config with verbose (custom file) succeeds", - args: args{d: verboseCustomFileCfg}, + args: args{ + apiSetup: apiSetup{ + apikey: "secret", + verbose: true, + verboseFile: customFile.Name(), + timeout: defaultTimeout.String(), + endpoint: api.ESSEndpoint, + }, + }, want: api.Config{ UserAgent: fmt.Sprintf(providerUserAgentFmt, Version, api.DefaultUserAgent), ErrorDevice: os.Stdout, @@ -311,9 +273,19 @@ func Test_newAPIConfig(t *testing.T) { }, wantFileName: filepath.Base(customFile.Name()), }, + { name: "custom config with verbose and verbose_credentials (custom file) succeeds", - args: args{d: verboseAndCredsCustomFileCfg}, + args: args{ + apiSetup: apiSetup{ + apikey: "secret", + verbose: true, + verboseFile: customFile.Name(), + verboseCredentials: true, + timeout: defaultTimeout.String(), + endpoint: api.ESSEndpoint, + }, + }, want: api.Config{ UserAgent: fmt.Sprintf(providerUserAgentFmt, Version, api.DefaultUserAgent), ErrorDevice: os.Stdout, @@ -329,9 +301,18 @@ func Test_newAPIConfig(t *testing.T) { }, wantFileName: filepath.Base(customFile.Name()), }, + { name: "custom config with verbose and verbose_credentials (invalid file) fails ", - args: args{d: verboseInvalidFileCfg}, + args: args{ + apiSetup: apiSetup{ + apikey: "secret", + verbose: true, + verboseFile: invalidPath, + verboseCredentials: true, + timeout: defaultTimeout.String(), + }, + }, err: fmt.Errorf(`failed creating verbose file "%s": %w`, invalidPath, &os.PathError{ @@ -344,7 +325,7 @@ func Test_newAPIConfig(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := newAPIConfigLegacy(tt.args.d) + got, err := newAPIConfig(tt.args.apiSetup) assert.Equal(t, tt.err, err) if got.Verbose && err == nil { @@ -359,20 +340,3 @@ func Test_newAPIConfig(t *testing.T) { }) } } - -func unsetECAPIKey(t *testing.T) func() { - t.Helper() - // This is necessary to avoid any EC_API_KEY which might be set to cause - // test flakyness. - if k := os.Getenv("EC_API_KEY"); k != "" { - if err := os.Unsetenv("EC_API_KEY"); err != nil { - t.Fatal(err) - } - return func() { - if err := os.Setenv("EC_API_KEY", k); err != nil { - t.Fatal(err) - } - } - } - return func() {} -} From 51f1eaaf5dd55300bd80d6966cdd70394e8a37b1 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Tue, 13 Dec 2022 13:52:03 +0100 Subject: [PATCH 020/104] Improve default_value plan modifier --- ec/internal/planmodifier/default_value.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/ec/internal/planmodifier/default_value.go b/ec/internal/planmodifier/default_value.go index 5522279c4..815ded36a 100644 --- a/ec/internal/planmodifier/default_value.go +++ b/ec/internal/planmodifier/default_value.go @@ -46,10 +46,19 @@ func (m *defaultValueAttributePlanModifier) MarkdownDescription(ctx context.Cont return fmt.Sprintf("Sets the default value %q (%s) if the attribute is not set", m.DefaultValue, m.DefaultValue.Type(ctx)) } -func (m *defaultValueAttributePlanModifier) Modify(_ context.Context, req tfsdk.ModifyAttributePlanRequest, res *tfsdk.ModifyAttributePlanResponse) { +func (m *defaultValueAttributePlanModifier) Modify(_ context.Context, req tfsdk.ModifyAttributePlanRequest, resp *tfsdk.ModifyAttributePlanResponse) { + if resp.AttributePlan == nil || req.AttributeConfig == nil { + return + } + if !req.AttributeConfig.IsNull() { return } - res.AttributePlan = m.DefaultValue + // if the config is the unknown value, use the unknown value otherwise, interpolation gets messed up + if req.AttributeConfig.IsUnknown() { + return + } + + resp.AttributePlan = m.DefaultValue } From d4499318853008f0d4c4040ef1947ae91e156f5b Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Tue, 13 Dec 2022 13:52:53 +0100 Subject: [PATCH 021/104] remove obsolete var --- .../enterprisesearch/v2/enterprise_search_topology.go | 1 - 1 file changed, 1 deletion(-) diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_topology.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_topology.go index 44ffd3839..ebdbcb943 100644 --- a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_topology.go +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_topology.go @@ -31,7 +31,6 @@ import ( ) const ( - minimumApmSize = 512 minimumEnterpriseSearchSize = 2048 ) From b51e74a99765aa53abaa20ed07a5883a55d004f6 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Tue, 13 Dec 2022 13:53:18 +0100 Subject: [PATCH 022/104] improve schemas --- .../deploymentresource/apm/v1/schema.go | 38 ++++--------------- .../deploymentresource/apm/v2/schema.go | 38 ++++--------------- .../deployment/v1/schema.go | 13 ------- .../deployment/v2/schema.go | 3 -- .../elasticsearch/v1/schema.go | 15 -------- .../elasticsearch/v2/schema.go | 8 +--- .../enterprisesearch/v1/schema.go | 3 -- .../enterprisesearch/v2/schema.go | 3 -- .../integrationsserver/v1/schema.go | 4 -- .../integrationsserver/v2/schema.go | 4 -- .../deploymentresource/kibana/v1/schema.go | 3 -- .../deploymentresource/kibana/v2/schema.go | 3 -- .../observability/v1/schema.go | 2 - .../observability/v2/schema.go | 2 - 14 files changed, 17 insertions(+), 122 deletions(-) diff --git a/ec/ecresource/deploymentresource/apm/v1/schema.go b/ec/ecresource/deploymentresource/apm/v1/schema.go index 18baf804a..b62b9102d 100644 --- a/ec/ecresource/deploymentresource/apm/v1/schema.go +++ b/ec/ecresource/deploymentresource/apm/v1/schema.go @@ -57,7 +57,6 @@ func ApmTopologySchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "memory"}), - resource.UseStateForUnknown(), }, }, "zone_count": { @@ -92,7 +91,6 @@ func ApmConfigSchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.Bool{Value: false}), - resource.UseStateForUnknown(), }, }, "user_settings_json": { @@ -131,8 +129,6 @@ func ApmSchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), - // resource.UseStateForUnknown(), - // planmodifier.UseStateForNoChange(), }, }, "ref_id": { @@ -141,41 +137,23 @@ func ApmSchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "main-apm"}), - // resource.UseStateForUnknown(), - // planmodifier.UseStateForNoChange(), }, }, "resource_id": { - Type: types.StringType, - Computed: true, - PlanModifiers: []tfsdk.AttributePlanModifier{ - // resource.UseStateForUnknown(), - // planmodifier.UseStateForNoChange(), - }, + Type: types.StringType, + Computed: true, }, "region": { - Type: types.StringType, - Computed: true, - PlanModifiers: []tfsdk.AttributePlanModifier{ - // resource.UseStateForUnknown(), - // planmodifier.UseStateForNoChange(), - }, + Type: types.StringType, + Computed: true, }, "http_endpoint": { - Type: types.StringType, - Computed: true, - PlanModifiers: []tfsdk.AttributePlanModifier{ - // resource.UseStateForUnknown(), - // planmodifier.UseStateForNoChange(), - }, + Type: types.StringType, + Computed: true, }, "https_endpoint": { - Type: types.StringType, - Computed: true, - PlanModifiers: []tfsdk.AttributePlanModifier{ - // resource.UseStateForUnknown(), - // planmodifier.UseStateForNoChange(), - }, + Type: types.StringType, + Computed: true, }, "topology": ApmTopologySchema(), "config": ApmConfigSchema(), diff --git a/ec/ecresource/deploymentresource/apm/v2/schema.go b/ec/ecresource/deploymentresource/apm/v2/schema.go index 5659352bf..3783d57ce 100644 --- a/ec/ecresource/deploymentresource/apm/v2/schema.go +++ b/ec/ecresource/deploymentresource/apm/v2/schema.go @@ -43,7 +43,6 @@ func ApmConfigSchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.Bool{Value: false}), - resource.UseStateForUnknown(), }, }, "user_settings_json": { @@ -81,8 +80,6 @@ func ApmSchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), - // resource.UseStateForUnknown(), - // planmodifier.UseStateForNoChange(), }, }, "ref_id": { @@ -91,41 +88,23 @@ func ApmSchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "main-apm"}), - // resource.UseStateForUnknown(), - // planmodifier.UseStateForNoChange(), }, }, "resource_id": { - Type: types.StringType, - Computed: true, - PlanModifiers: []tfsdk.AttributePlanModifier{ - // resource.UseStateForUnknown(), - // planmodifier.UseStateForNoChange(), - }, + Type: types.StringType, + Computed: true, }, "region": { - Type: types.StringType, - Computed: true, - PlanModifiers: []tfsdk.AttributePlanModifier{ - // resource.UseStateForUnknown(), - // planmodifier.UseStateForNoChange(), - }, + Type: types.StringType, + Computed: true, }, "http_endpoint": { - Type: types.StringType, - Computed: true, - PlanModifiers: []tfsdk.AttributePlanModifier{ - // resource.UseStateForUnknown(), - // planmodifier.UseStateForNoChange(), - }, + Type: types.StringType, + Computed: true, }, "https_endpoint": { - Type: types.StringType, - Computed: true, - PlanModifiers: []tfsdk.AttributePlanModifier{ - // resource.UseStateForUnknown(), - // planmodifier.UseStateForNoChange(), - }, + Type: types.StringType, + Computed: true, }, "instance_configuration_id": { Type: types.StringType, @@ -150,7 +129,6 @@ func ApmSchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "memory"}), - resource.UseStateForUnknown(), }, }, "zone_count": { diff --git a/ec/ecresource/deploymentresource/deployment/v1/schema.go b/ec/ecresource/deploymentresource/deployment/v1/schema.go index 44357a4d2..97d692fac 100644 --- a/ec/ecresource/deploymentresource/deployment/v1/schema.go +++ b/ec/ecresource/deploymentresource/deployment/v1/schema.go @@ -41,9 +41,6 @@ func DeploymentSchema() tfsdk.Schema { Type: types.StringType, Computed: true, MarkdownDescription: "Unique identifier of this resource.", - // PlanModifiers: tfsdk.AttributePlanModifiers{ - // resource.UseStateForUnknown(), - // }, }, "alias": { Type: types.StringType, @@ -86,27 +83,17 @@ func DeploymentSchema() tfsdk.Schema { Type: types.StringType, Description: "Computed username obtained upon creating the Elasticsearch resource", Computed: true, - // PlanModifiers: tfsdk.AttributePlanModifiers{ - // resource.UseStateForUnknown(), - // }, }, "elasticsearch_password": { Type: types.StringType, Description: "Computed password obtained upon creating the Elasticsearch resource", Computed: true, Sensitive: true, - // PlanModifiers: tfsdk.AttributePlanModifiers{ - // resource.UseStateForUnknown(), - // }, }, "apm_secret_token": { Type: types.StringType, Computed: true, Sensitive: true, - // PlanModifiers: tfsdk.AttributePlanModifiers{ - // // resource.UseStateForUnknown(), - // planmodifier.UseStateForNoChange(), - // }, }, "traffic_filter": { Type: types.SetType{ diff --git a/ec/ecresource/deploymentresource/deployment/v2/schema.go b/ec/ecresource/deploymentresource/deployment/v2/schema.go index bed20c35d..6918f1f8c 100644 --- a/ec/ecresource/deploymentresource/deployment/v2/schema.go +++ b/ec/ecresource/deploymentresource/deployment/v2/schema.go @@ -103,9 +103,6 @@ func DeploymentSchema() tfsdk.Schema { Type: types.StringType, Computed: true, Sensitive: true, - // PlanModifiers: tfsdk.AttributePlanModifiers{ - // ApmSecretTokenPlanModifier(), - // }, }, "traffic_filter": { Type: types.SetType{ diff --git a/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go b/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go index c3c5ddbf5..d6a6f3a70 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go @@ -71,24 +71,17 @@ func ElasticsearchSchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), - resource.UseStateForUnknown(), }, }, "resource_id": { Type: types.StringType, Description: "The Elasticsearch resource unique identifier", Computed: true, - // PlanModifiers: tfsdk.AttributePlanModifiers{ - // resource.UseStateForUnknown(), - // }, }, "region": { Type: types.StringType, Description: "The Elasticsearch resource region", Computed: true, - // PlanModifiers: tfsdk.AttributePlanModifiers{ - // resource.UseStateForUnknown(), - // }, }, "cloud_id": { Type: types.StringType, @@ -105,17 +98,11 @@ func ElasticsearchSchema() tfsdk.Attribute { Type: types.StringType, Description: "The Elasticsearch resource HTTP endpoint", Computed: true, - // PlanModifiers: tfsdk.AttributePlanModifiers{ - // resource.UseStateForUnknown(), - // }, }, "https_endpoint": { Type: types.StringType, Description: "The Elasticsearch resource HTTPs endpoint", Computed: true, - // PlanModifiers: tfsdk.AttributePlanModifiers{ - // resource.UseStateForUnknown(), - // }, }, "topology": ElasticsearchTopologySchema(), @@ -322,7 +309,6 @@ func ElasticsearchRemoteClusterSchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), - resource.UseStateForUnknown(), }, Optional: true, }, @@ -354,7 +340,6 @@ func ElasticsearchSnapshotSourceSchema() tfsdk.Attribute { Type: types.StringType, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "__latest_success__"}), - resource.UseStateForUnknown(), }, Optional: true, Computed: true, diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go index 251e8bed8..14388d0a7 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go @@ -65,7 +65,6 @@ func ElasticsearchSchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), - resource.UseStateForUnknown(), }, }, "resource_id": { @@ -174,9 +173,7 @@ func ElasticsearchConfigSchema() tfsdk.Attribute { func ElasticsearchTopologyAutoscalingSchema(tierName string) tfsdk.Attribute { return tfsdk.Attribute{ Description: "Optional Elasticsearch autoscaling settings, such a maximum and minimum size and resources.", - // Optional: true, - // Computed: true, - Required: true, + Required: true, Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ "max_size_resource": { Description: "Maximum resource type for the maximum autoscaling setting.", @@ -251,7 +248,6 @@ func ElasticsearchRemoteClusterSchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), - resource.UseStateForUnknown(), }, Optional: true, }, @@ -283,7 +279,6 @@ func ElasticsearchSnapshotSourceSchema() tfsdk.Attribute { Type: types.StringType, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "__latest_success__"}), - resource.UseStateForUnknown(), }, Optional: true, Computed: true, @@ -414,7 +409,6 @@ func ElasticsearchTierSchema(description string, required bool, tierName string) Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "memory"}), - UseTierStateForUnknown(tierName), }, }, "zone_count": { diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v1/schema.go b/ec/ecresource/deploymentresource/enterprisesearch/v1/schema.go index 4afd0d7e4..6a7e89573 100644 --- a/ec/ecresource/deploymentresource/enterprisesearch/v1/schema.go +++ b/ec/ecresource/deploymentresource/enterprisesearch/v1/schema.go @@ -37,7 +37,6 @@ func EnterpriseSearchSchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), - resource.UseStateForUnknown(), }, }, "ref_id": { @@ -46,7 +45,6 @@ func EnterpriseSearchSchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "main-enterprise_search"}), - resource.UseStateForUnknown(), }, }, "resource_id": { @@ -108,7 +106,6 @@ func EnterpriseSearchSchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "memory"}), - resource.UseStateForUnknown(), }, }, "zone_count": { diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/schema.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/schema.go index fad6817cb..ebbbe94ab 100644 --- a/ec/ecresource/deploymentresource/enterprisesearch/v2/schema.go +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/schema.go @@ -35,7 +35,6 @@ func EnterpriseSearchSchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), - resource.UseStateForUnknown(), }, }, "ref_id": { @@ -44,7 +43,6 @@ func EnterpriseSearchSchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "main-enterprise_search"}), - resource.UseStateForUnknown(), }, }, "resource_id": { @@ -98,7 +96,6 @@ func EnterpriseSearchSchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "memory"}), - resource.UseStateForUnknown(), }, }, "zone_count": { diff --git a/ec/ecresource/deploymentresource/integrationsserver/v1/schema.go b/ec/ecresource/deploymentresource/integrationsserver/v1/schema.go index 66236e811..4fbc8117a 100644 --- a/ec/ecresource/deploymentresource/integrationsserver/v1/schema.go +++ b/ec/ecresource/deploymentresource/integrationsserver/v1/schema.go @@ -37,7 +37,6 @@ func IntegrationsServerSchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), - resource.UseStateForUnknown(), }, }, "ref_id": { @@ -46,7 +45,6 @@ func IntegrationsServerSchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "main-integrations_server"}), - resource.UseStateForUnknown(), }, }, "resource_id": { @@ -108,7 +106,6 @@ func IntegrationsServerSchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "memory"}), - resource.UseStateForUnknown(), }, }, "zone_count": { @@ -141,7 +138,6 @@ func IntegrationsServerSchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.Bool{Value: false}), - resource.UseStateForUnknown(), }, }, "user_settings_json": { diff --git a/ec/ecresource/deploymentresource/integrationsserver/v2/schema.go b/ec/ecresource/deploymentresource/integrationsserver/v2/schema.go index 07a9a2df4..9f12453ea 100644 --- a/ec/ecresource/deploymentresource/integrationsserver/v2/schema.go +++ b/ec/ecresource/deploymentresource/integrationsserver/v2/schema.go @@ -35,7 +35,6 @@ func IntegrationsServerSchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), - resource.UseStateForUnknown(), }, }, "ref_id": { @@ -44,7 +43,6 @@ func IntegrationsServerSchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "main-integrations_server"}), - resource.UseStateForUnknown(), }, }, "resource_id": { @@ -98,7 +96,6 @@ func IntegrationsServerSchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "memory"}), - resource.UseStateForUnknown(), }, }, "zone_count": { @@ -128,7 +125,6 @@ func IntegrationsServerSchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.Bool{Value: false}), - resource.UseStateForUnknown(), }, }, "user_settings_json": { diff --git a/ec/ecresource/deploymentresource/kibana/v1/schema.go b/ec/ecresource/deploymentresource/kibana/v1/schema.go index 65b5c8d62..f95742771 100644 --- a/ec/ecresource/deploymentresource/kibana/v1/schema.go +++ b/ec/ecresource/deploymentresource/kibana/v1/schema.go @@ -35,7 +35,6 @@ func KibanaSchema() tfsdk.Attribute { Type: types.StringType, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), - resource.UseStateForUnknown(), }, Computed: true, Optional: true, @@ -44,7 +43,6 @@ func KibanaSchema() tfsdk.Attribute { Type: types.StringType, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "main-kibana"}), - resource.UseStateForUnknown(), }, Computed: true, Optional: true, @@ -106,7 +104,6 @@ func KibanaSchema() tfsdk.Attribute { Description: `Optional size type, defaults to "memory".`, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "memory"}), - resource.UseStateForUnknown(), }, Computed: true, Optional: true, diff --git a/ec/ecresource/deploymentresource/kibana/v2/schema.go b/ec/ecresource/deploymentresource/kibana/v2/schema.go index d15a75119..3740df7ef 100644 --- a/ec/ecresource/deploymentresource/kibana/v2/schema.go +++ b/ec/ecresource/deploymentresource/kibana/v2/schema.go @@ -33,7 +33,6 @@ func KibanaSchema() tfsdk.Attribute { Type: types.StringType, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), - resource.UseStateForUnknown(), }, Computed: true, Optional: true, @@ -42,7 +41,6 @@ func KibanaSchema() tfsdk.Attribute { Type: types.StringType, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "main-kibana"}), - resource.UseStateForUnknown(), }, Computed: true, Optional: true, @@ -96,7 +94,6 @@ func KibanaSchema() tfsdk.Attribute { Description: `Optional size type, defaults to "memory".`, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.String{Value: "memory"}), - resource.UseStateForUnknown(), }, Computed: true, Optional: true, diff --git a/ec/ecresource/deploymentresource/observability/v1/schema.go b/ec/ecresource/deploymentresource/observability/v1/schema.go index 20e283ddf..9196858fa 100644 --- a/ec/ecresource/deploymentresource/observability/v1/schema.go +++ b/ec/ecresource/deploymentresource/observability/v1/schema.go @@ -49,7 +49,6 @@ func ObservabilitySchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.Bool{Value: true}), - resource.UseStateForUnknown(), }, }, "metrics": { @@ -58,7 +57,6 @@ func ObservabilitySchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.Bool{Value: true}), - resource.UseStateForUnknown(), }, }, }), diff --git a/ec/ecresource/deploymentresource/observability/v2/schema.go b/ec/ecresource/deploymentresource/observability/v2/schema.go index 21dcd4b42..634e1ee5f 100644 --- a/ec/ecresource/deploymentresource/observability/v2/schema.go +++ b/ec/ecresource/deploymentresource/observability/v2/schema.go @@ -47,7 +47,6 @@ func ObservabilitySchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.Bool{Value: true}), - resource.UseStateForUnknown(), }, }, "metrics": { @@ -56,7 +55,6 @@ func ObservabilitySchema() tfsdk.Attribute { Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ planmodifier.DefaultValue(types.Bool{Value: true}), - resource.UseStateForUnknown(), }, }, }), From 34daf0feab9ec58f901fdec748f7bdaeab795e68 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 14 Dec 2022 14:11:25 +0100 Subject: [PATCH 023/104] remove obsolete code --- ec/internal/util/changes.go | 28 ----------- ec/internal/util/changes_test.go | 86 -------------------------------- ec/internal/util/helpers.go | 20 -------- 3 files changed, 134 deletions(-) delete mode 100644 ec/internal/util/changes.go delete mode 100644 ec/internal/util/changes_test.go diff --git a/ec/internal/util/changes.go b/ec/internal/util/changes.go deleted file mode 100644 index 431dabaca..000000000 --- a/ec/internal/util/changes.go +++ /dev/null @@ -1,28 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package util - -import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - -// ObjectRemoved takes in a ResourceData and a key string, returning whether -// or not the object ([]intreface{} type) is being removed in the current -// change. -func ObjectRemoved(d *schema.ResourceData, key string) bool { - old, new := d.GetChange(key) - return len(old.([]interface{})) > 0 && len(new.([]interface{})) == 0 -} diff --git a/ec/internal/util/changes_test.go b/ec/internal/util/changes_test.go deleted file mode 100644 index b78c35c9d..000000000 --- a/ec/internal/util/changes_test.go +++ /dev/null @@ -1,86 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package util - -import ( - "testing" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func TestObjectRemoved(t *testing.T) { - schemaMap := map[string]*schema.Schema{ - "object": { - Type: schema.TypeList, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - } - type args struct { - d *schema.ResourceData - key string - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "removes an object", - args: args{ - key: "object", - d: NewResourceData(t, ResDataParams{ - ID: "id", - Schema: schemaMap, - State: map[string]interface{}{ - "object": []interface{}{"a", "b"}, - }, - Change: map[string]interface{}{ - "object": []interface{}{}, - }, - }), - }, - want: true, - }, - { - name: "does not remove an object", - args: args{ - key: "object", - d: NewResourceData(t, ResDataParams{ - ID: "id", - Schema: schemaMap, - State: map[string]interface{}{ - "object": []interface{}{"a", "b"}, - }, - Change: map[string]interface{}{ - "object": []interface{}{"b"}, - }, - }), - }, - want: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := ObjectRemoved(tt.args.d, tt.args.key); got != tt.want { - t.Errorf("ObjectRemoved() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/ec/internal/util/helpers.go b/ec/internal/util/helpers.go index 8f9c72fc9..6418b23fc 100644 --- a/ec/internal/util/helpers.go +++ b/ec/internal/util/helpers.go @@ -24,7 +24,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/elastic/cloud-sdk-go/pkg/models" ) @@ -92,25 +91,6 @@ func MultiGetenvOrDefault(keys []string, defaultValue string) string { return defaultValue } -func GetStringFromSchemaOrEnv(d *schema.ResourceData, key string, envKeys []string, defaultValue string) string { - if value, ok := d.GetOk(key); ok { - return value.(string) - } - return MultiGetenvOrDefault(envKeys, defaultValue) -} -func GetBoolFromSchemaOrEnv(d *schema.ResourceData, key string, envKeys []string) bool { - if value, ok := d.GetOk(key); ok { - return value.(bool) - } - - strValue := MultiGetenvOrDefault(envKeys, "false") - value, err := StringToBool(strValue) - if err != nil { - return false - } - return value -} - func StringToBool(str string) (bool, error) { if str == "" { return false, nil From dd0508b41a244cf3fe2866a7dc197fc0b79b8bd7 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 14 Dec 2022 14:44:58 +0100 Subject: [PATCH 024/104] remove incorrect comment --- ec/ecresource/deploymentresource/read.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/ec/ecresource/deploymentresource/read.go b/ec/ecresource/deploymentresource/read.go index d27388c03..dbaca555a 100644 --- a/ec/ecresource/deploymentresource/read.go +++ b/ec/ecresource/deploymentresource/read.go @@ -178,8 +178,6 @@ func deploymentNotFound(err error) bool { return apierror.IsRuntimeStatusCode(err, 403) } -// Setting this variable here so that it is parsed at compile time in case -// any errors are thrown, they are at compile time not when the user runs it. var minimumSupportedVersion = semver.MustParse("6.6.0") func checkVersion(version string) error { From 947e8b2ab5849d57316fb6ce5185d4c7fb789dc8 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 14 Dec 2022 14:55:49 +0100 Subject: [PATCH 025/104] remove obsolete var --- ec/ecresource/deploymentresource/read.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ec/ecresource/deploymentresource/read.go b/ec/ecresource/deploymentresource/read.go index dbaca555a..0b98bb8f8 100644 --- a/ec/ecresource/deploymentresource/read.go +++ b/ec/ecresource/deploymentresource/read.go @@ -45,6 +45,7 @@ func (r *Resource) Read(ctx context.Context, request resource.ReadRequest, respo var curState deploymentv2.DeploymentTF diags := request.State.Get(ctx, &curState) + response.Diagnostics.Append(diags...) if response.Diagnostics.HasError() { @@ -52,12 +53,11 @@ func (r *Resource) Read(ctx context.Context, request resource.ReadRequest, respo } var newState *deploymentv2.DeploymentTF - var err error // use state for the plan (there is no plan and config during Read) - otherwise we can get unempty plan output - if newState, diags = r.read(ctx, curState.Id.Value, &curState, curState, nil); err != nil { - response.Diagnostics.Append(diags...) - } + newState, diags = r.read(ctx, curState.Id.Value, &curState, curState, nil) + + response.Diagnostics.Append(diags...) if newState == nil { response.State.RemoveResource(ctx) From dfd606078cd714101cda557fe1dc881df3b46dde Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 14 Dec 2022 15:19:14 +0100 Subject: [PATCH 026/104] address PR comments --- .../enterprisesearch/v2/enterprise_search.go | 10 +++++----- .../enterprisesearch/v2/enterprise_search_config.go | 4 ++-- .../enterprisesearch/v2/enterprise_search_topology.go | 10 +++++----- ec/ecresource/deploymentresource/read.go | 7 ++++++- 4 files changed, 18 insertions(+), 13 deletions(-) diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search.go index ad846c3c6..3470d3509 100644 --- a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search.go +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search.go @@ -101,7 +101,7 @@ func ReadEnterpriseSearch(in *models.EnterpriseSearchResourceInfo) (*EnterpriseS ess.HttpEndpoint, ess.HttpsEndpoint = converters.ExtractEndpoints(in.Info.Metadata) - cfg, err := ReadEnterpriseSearchConfig(plan.EnterpriseSearch) + cfg, err := readEnterpriseSearchConfig(plan.EnterpriseSearch) if err != nil { return nil, err } @@ -133,7 +133,7 @@ func (es *EnterpriseSearchTF) Payload(ctx context.Context, payload models.Enterp diags.Append(ds...) if !ds.HasError() && config != nil { - diags.Append(EnterpriseSearchConfigPayload(ctx, *config, payload.Plan.EnterpriseSearch)...) + diags.Append(enterpriseSearchConfigPayload(ctx, *config, payload.Plan.EnterpriseSearch)...) } } @@ -188,7 +188,7 @@ func EnterpriseSearchesPayload(ctx context.Context, esObj types.Object, template return nil, nil } - templatePayload := EssResource(template) + templatePayload := payloadFromTemplate(template) if templatePayload == nil { diags.AddError( @@ -207,9 +207,9 @@ func EnterpriseSearchesPayload(ctx context.Context, esObj types.Object, template return payload, nil } -// EssResource returns the EnterpriseSearchPayload from a deployment +// payloadFromTemplate returns the EnterpriseSearchPayload from a deployment // template or an empty version of the payload. -func EssResource(template *models.DeploymentTemplateInfoV2) *models.EnterpriseSearchPayload { +func payloadFromTemplate(template *models.DeploymentTemplateInfoV2) *models.EnterpriseSearchPayload { if template == nil || len(template.DeploymentTemplate.Resources.EnterpriseSearch) == 0 { return nil } diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_config.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_config.go index 2d3f64218..5bfaacd32 100644 --- a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_config.go +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_config.go @@ -30,7 +30,7 @@ import ( type EnterpriseSearchConfig v1.EnterpriseSearchConfig -func ReadEnterpriseSearchConfig(in *models.EnterpriseSearchConfiguration) (*EnterpriseSearchConfig, error) { +func readEnterpriseSearchConfig(in *models.EnterpriseSearchConfiguration) (*EnterpriseSearchConfig, error) { var cfg EnterpriseSearchConfig if in == nil { @@ -68,7 +68,7 @@ func ReadEnterpriseSearchConfig(in *models.EnterpriseSearchConfiguration) (*Ente return &cfg, nil } -func EnterpriseSearchConfigPayload(ctx context.Context, cfg v1.EnterpriseSearchConfigTF, res *models.EnterpriseSearchConfiguration) diag.Diagnostics { +func enterpriseSearchConfigPayload(ctx context.Context, cfg v1.EnterpriseSearchConfigTF, res *models.EnterpriseSearchConfiguration) diag.Diagnostics { var diags diag.Diagnostics if cfg.UserSettingsJson.Value != "" { diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_topology.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_topology.go index ebdbcb943..1546ce7c8 100644 --- a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_topology.go +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_topology.go @@ -34,9 +34,9 @@ const ( minimumEnterpriseSearchSize = 2048 ) -type EnterpriseSearchTopologies v1.EnterpriseSearchTopologies +type enterpriseSearchTopologies v1.EnterpriseSearchTopologies -func ReadEnterpriseSearchTopology(in *models.EnterpriseSearchTopologyElement) (*v1.EnterpriseSearchTopology, error) { +func readEnterpriseSearchTopology(in *models.EnterpriseSearchTopologyElement) (*v1.EnterpriseSearchTopology, error) { var topology v1.EnterpriseSearchTopology topology.InstanceConfigurationId = ec.String(in.InstanceConfigurationID) @@ -65,18 +65,18 @@ func ReadEnterpriseSearchTopology(in *models.EnterpriseSearchTopologyElement) (* return &topology, nil } -func ReadEnterpriseSearchTopologies(in []*models.EnterpriseSearchTopologyElement) (EnterpriseSearchTopologies, error) { +func ReadEnterpriseSearchTopologies(in []*models.EnterpriseSearchTopologyElement) (enterpriseSearchTopologies, error) { if len(in) == 0 { return nil, nil } - topologies := make(EnterpriseSearchTopologies, 0, len(in)) + topologies := make(enterpriseSearchTopologies, 0, len(in)) for _, model := range in { if model.Size == nil || model.Size.Value == nil || *model.Size.Value == 0 { continue } - topology, err := ReadEnterpriseSearchTopology(model) + topology, err := readEnterpriseSearchTopology(model) if err != nil { return nil, err } diff --git a/ec/ecresource/deploymentresource/read.go b/ec/ecresource/deploymentresource/read.go index 0b98bb8f8..bf012ae47 100644 --- a/ec/ecresource/deploymentresource/read.go +++ b/ec/ecresource/deploymentresource/read.go @@ -147,7 +147,12 @@ func (r *Resource) read(ctx context.Context, id string, state *deploymentv2.Depl // ReadDeployment returns empty config struct if there is no config, so we have to nullify it if plan doesn't contain it // we use state for plan in Read and there is no state during import so we need to check elasticsearchPlan against nil - if elasticsearchPlan != nil && elasticsearchPlan.Config.IsNull() && deployment.Elasticsearch != nil && deployment.Elasticsearch.Config != nil && deployment.Elasticsearch.Config.IsEmpty() { + if elasticsearchPlan != nil && + elasticsearchPlan.Config.IsNull() && + deployment.Elasticsearch != nil && + deployment.Elasticsearch.Config != nil && + deployment.Elasticsearch.Config.IsEmpty() { + deployment.Elasticsearch.Config = nil } From 7f676f555832980a27398d1b9741a4835a26b551 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Mon, 19 Dec 2022 19:04:31 +0100 Subject: [PATCH 027/104] Remove obsolete function --- .../converters/parse_topology_size_test.go | 97 +++++++++++++++++++ ec/internal/util/parsers.go | 28 ------ ec/internal/util/parsers_test.go | 72 -------------- 3 files changed, 97 insertions(+), 100 deletions(-) create mode 100644 ec/internal/converters/parse_topology_size_test.go diff --git a/ec/internal/converters/parse_topology_size_test.go b/ec/internal/converters/parse_topology_size_test.go new file mode 100644 index 000000000..6ebd95e93 --- /dev/null +++ b/ec/internal/converters/parse_topology_size_test.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package converters + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" +) + +func TestParseTopologySize(t *testing.T) { + type args struct { + size *string + resource *string + } + tests := []struct { + name string + args args + want *models.TopologySize + err error + }{ + { + name: "has no size returns nil", + }, + { + name: "has empty size returns nil", + args: args{ + size: ec.String(""), + }, + }, + { + name: "has badly formatted size returns error", + args: args{ + size: ec.String("asdasd"), + }, + err: errors.New(`failed to convert "asdasd" to `), + }, + { + name: "has size but no size_resource", + args: args{ + size: ec.String("15g"), + }, + want: &models.TopologySize{ + Value: ec.Int32(15360), + Resource: ec.String("memory"), + }, + }, + { + name: "has size and explicit size_resource (memory)", + args: args{ + size: ec.String("8g"), + resource: ec.String("memory"), + }, + want: &models.TopologySize{ + Value: ec.Int32(8192), + Resource: ec.String("memory"), + }, + }, + { + name: "has size and explicit size_resource (storage)", + args: args{ + size: ec.String("4g"), + resource: ec.String("storage"), + }, + want: &models.TopologySize{ + Value: ec.Int32(4096), + Resource: ec.String("storage"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ParseTopologySize(tt.args.size, tt.args.resource) + assert.Equal(t, tt.err, err) + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/ec/internal/util/parsers.go b/ec/internal/util/parsers.go index 4c512c335..dd1b174c1 100644 --- a/ec/internal/util/parsers.go +++ b/ec/internal/util/parsers.go @@ -19,14 +19,8 @@ package util import ( "fmt" - - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deploymentsize" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" ) -const defaultSizeResource = "memory" - // MemoryToState parses a megabyte int notation to a gigabyte notation. func MemoryToState(mem int32) string { if mem%1024 > 1 && mem%512 == 0 { @@ -34,25 +28,3 @@ func MemoryToState(mem int32) string { } return fmt.Sprintf("%dg", mem/1024) } - -// ParseTopologySize parses a flattened topology into its model. -func ParseTopologySize(topology map[string]interface{}) (*models.TopologySize, error) { - if mem, ok := topology["size"].(string); ok && mem != "" { - val, err := deploymentsize.ParseGb(mem) - if err != nil { - return nil, err - } - - var sizeResource = defaultSizeResource - if sr, ok := topology["size_resource"].(string); ok { - sizeResource = sr - } - - return &models.TopologySize{ - Value: ec.Int32(val), - Resource: ec.String(sizeResource), - }, nil - } - - return nil, nil -} diff --git a/ec/internal/util/parsers_test.go b/ec/internal/util/parsers_test.go index 2f8f3c1a5..958490b9c 100644 --- a/ec/internal/util/parsers_test.go +++ b/ec/internal/util/parsers_test.go @@ -18,13 +18,9 @@ package util import ( - "errors" "testing" "github.com/stretchr/testify/assert" - - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" ) func TestMemoryToState(t *testing.T) { @@ -54,71 +50,3 @@ func TestMemoryToState(t *testing.T) { }) } } - -func TestParseTopologySize(t *testing.T) { - type args struct { - topology map[string]interface{} - } - tests := []struct { - name string - args args - want *models.TopologySize - err error - }{ - { - name: "has no size returns nil", - }, - { - name: "has empty size returns nil", - args: args{topology: map[string]interface{}{ - "size": "", - }}, - }, - { - name: "has badly formatted size returns error", - args: args{topology: map[string]interface{}{ - "size": "asdasd", - }}, - err: errors.New(`failed to convert "asdasd" to `), - }, - { - name: "has size but no size_resource", - args: args{topology: map[string]interface{}{ - "size": "15g", - }}, - want: &models.TopologySize{ - Value: ec.Int32(15360), - Resource: ec.String("memory"), - }, - }, - { - name: "has size and explicit size_resource (memory)", - args: args{topology: map[string]interface{}{ - "size": "8g", - "size_resource": "memory", - }}, - want: &models.TopologySize{ - Value: ec.Int32(8192), - Resource: ec.String("memory"), - }, - }, - { - name: "has size and explicit size_resource (storage)", - args: args{topology: map[string]interface{}{ - "size": "4g", - "size_resource": "storage", - }}, - want: &models.TopologySize{ - Value: ec.Int32(4096), - Resource: ec.String("storage"), - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := ParseTopologySize(tt.args.topology) - assert.Equal(t, tt.err, err) - assert.Equal(t, tt.want, got) - }) - } -} From 345a3413475ceac10538dcbf82fb0f52fd327397 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Tue, 20 Dec 2022 11:03:33 +0100 Subject: [PATCH 028/104] Move provider data to an anonymous struct --- ec/provider.go | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/ec/provider.go b/ec/provider.go index 34a75bc3a..2641826f3 100644 --- a/ec/provider.go +++ b/ec/provider.go @@ -161,18 +161,6 @@ func (p *Provider) GetSchema(context.Context) (tfsdk.Schema, diag.Diagnostics) { }, diags } -type providerData struct { - Endpoint types.String `tfsdk:"endpoint"` - ApiKey types.String `tfsdk:"apikey"` - Username types.String `tfsdk:"username"` - Password types.String `tfsdk:"password"` - Insecure types.Bool `tfsdk:"insecure"` - Timeout types.String `tfsdk:"timeout"` - Verbose types.Bool `tfsdk:"verbose"` - VerboseCredentials types.Bool `tfsdk:"verbose_credentials"` - VerboseFile types.String `tfsdk:"verbose_file"` -} - func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, res *provider.ConfigureResponse) { if p.client != nil { // Required for unit tests, because a mock client is pre-created there. @@ -182,7 +170,18 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, } // Retrieve provider data from configuration - var config providerData + var config struct { + Endpoint types.String `tfsdk:"endpoint"` + ApiKey types.String `tfsdk:"apikey"` + Username types.String `tfsdk:"username"` + Password types.String `tfsdk:"password"` + Insecure types.Bool `tfsdk:"insecure"` + Timeout types.String `tfsdk:"timeout"` + Verbose types.Bool `tfsdk:"verbose"` + VerboseCredentials types.Bool `tfsdk:"verbose_credentials"` + VerboseFile types.String `tfsdk:"verbose_file"` + } + diags := req.Config.Get(ctx, &config) res.Diagnostics.Append(diags...) if res.Diagnostics.HasError() { From bd0596738c0d060f9d8c401c0cb1f2952236ed6d Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Tue, 20 Dec 2022 20:05:06 +0100 Subject: [PATCH 029/104] unit test for provider Config --- ec/internal/util/helpers.go | 5 +- ec/provider.go | 69 +++++++++++------- ec/provider_test.go | 141 ++++++++++++++++++++++++++++++++++++ 3 files changed, 188 insertions(+), 27 deletions(-) create mode 100644 ec/provider_test.go diff --git a/ec/internal/util/helpers.go b/ec/internal/util/helpers.go index 6418b23fc..826fb40dd 100644 --- a/ec/internal/util/helpers.go +++ b/ec/internal/util/helpers.go @@ -28,6 +28,9 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/models" ) +// used in tests +var GetEnv = os.Getenv + // FlattenClusterEndpoint receives a ClusterMetadataInfo, parses the http and // https endpoints and returns a map with two keys: `http_endpoint` and // `https_endpoint` @@ -84,7 +87,7 @@ func IsCurrentEssPlanEmpty(res *models.EnterpriseSearchResourceInfo) bool { // variables have a value, the default value is returned. func MultiGetenvOrDefault(keys []string, defaultValue string) string { for _, key := range keys { - if value := os.Getenv(key); value != "" { + if value := GetEnv(key); value != "" { return value } } diff --git a/ec/provider.go b/ec/provider.go index 2641826f3..06f66f279 100644 --- a/ec/provider.go +++ b/ec/provider.go @@ -24,6 +24,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/provider" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/tfsdk" @@ -161,37 +162,53 @@ func (p *Provider) GetSchema(context.Context) (tfsdk.Schema, diag.Diagnostics) { }, diags } -func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, res *provider.ConfigureResponse) { +// Retrieve provider data from configuration +type providerConfig struct { + Endpoint types.String `tfsdk:"endpoint"` + ApiKey types.String `tfsdk:"apikey"` + Username types.String `tfsdk:"username"` + Password types.String `tfsdk:"password"` + Insecure types.Bool `tfsdk:"insecure"` + Timeout types.String `tfsdk:"timeout"` + Verbose types.Bool `tfsdk:"verbose"` + VerboseCredentials types.Bool `tfsdk:"verbose_credentials"` + VerboseFile types.String `tfsdk:"verbose_file"` +} + +func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) { if p.client != nil { // Required for unit tests, because a mock client is pre-created there. - res.DataSourceData = p.client - res.ResourceData = p.client + resp.DataSourceData = p.client + resp.ResourceData = p.client return } // Retrieve provider data from configuration - var config struct { - Endpoint types.String `tfsdk:"endpoint"` - ApiKey types.String `tfsdk:"apikey"` - Username types.String `tfsdk:"username"` - Password types.String `tfsdk:"password"` - Insecure types.Bool `tfsdk:"insecure"` - Timeout types.String `tfsdk:"timeout"` - Verbose types.Bool `tfsdk:"verbose"` - VerboseCredentials types.Bool `tfsdk:"verbose_credentials"` - VerboseFile types.String `tfsdk:"verbose_file"` - } + var config providerConfig diags := req.Config.Get(ctx, &config) - res.Diagnostics.Append(diags...) - if res.Diagnostics.HasError() { + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { return } var endpoint string if config.Endpoint.Null { endpoint = util.MultiGetenvOrDefault([]string{"EC_ENDPOINT", "EC_HOST"}, api.ESSEndpoint) - // TODO We need to validate the endpoint here, similar to how it is done if the value is passed via terraform (isURLWithSchemeValidator) + + validateReq := tfsdk.ValidateAttributeRequest{ + AttributePath: path.Root("endpoint"), + AttributeConfig: types.String{Value: endpoint}, + } + + validateResp := tfsdk.ValidateAttributeResponse{} + + validators.IsURLWithSchemeValidator(validURLSchemes).Validate(ctx, validateReq, &validateResp) + + if validateResp.Diagnostics.HasError() { + resp.Diagnostics.Append(validateResp.Diagnostics...) + return + } } else { endpoint = config.Endpoint.Value } @@ -222,7 +239,7 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, if config.Insecure.Null { insecureStr := util.MultiGetenvOrDefault([]string{"EC_INSECURE", "EC_SKIP_TLS_VALIDATION"}, "") if insecure, err = util.StringToBool(insecureStr); err != nil { - res.Diagnostics.AddWarning( + resp.Diagnostics.AddError( "Unable to create client", fmt.Sprintf("Invalid value %v for insecure", insecureStr), ) @@ -243,7 +260,7 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, if config.Verbose.Null { verboseStr := util.MultiGetenvOrDefault([]string{"EC_VERBOSE"}, "") if verbose, err = util.StringToBool(verboseStr); err != nil { - res.Diagnostics.AddWarning( + resp.Diagnostics.AddError( "Unable to create client", fmt.Sprintf("Invalid value %v for verbose", verboseStr), ) @@ -257,7 +274,7 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, if config.VerboseCredentials.Null { verboseCredentialsStr := util.MultiGetenvOrDefault([]string{"EC_VERBOSE_CREDENTIALS"}, "") if verboseCredentials, err = util.StringToBool(verboseCredentialsStr); err != nil { - res.Diagnostics.AddWarning( + resp.Diagnostics.AddError( "Unable to create client", fmt.Sprintf("Invalid value %v for verboseCredentials", verboseCredentialsStr), ) @@ -287,23 +304,23 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, }) if err != nil { - res.Diagnostics.AddWarning( + resp.Diagnostics.AddError( "Unable to create api Client config", - fmt.Sprintf("Unexpected error: %+v", err), + err.Error(), ) return } client, err := api.NewAPI(cfg) if err != nil { - res.Diagnostics.AddWarning( + resp.Diagnostics.AddError( "Unable to create api Client config", - fmt.Sprintf("Unexpected error: %+v", err), + err.Error(), ) return } p.client = client - res.DataSourceData = client - res.ResourceData = client + resp.DataSourceData = client + resp.ResourceData = client } diff --git a/ec/provider_test.go b/ec/provider_test.go new file mode 100644 index 000000000..700ac8262 --- /dev/null +++ b/ec/provider_test.go @@ -0,0 +1,141 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package ec + +import ( + "context" + "testing" + + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" +) + +func Test_Configure(t *testing.T) { + type args struct { + env map[string]string + config providerConfig + } + + tests := []struct { + name string + args args + diags diag.Diagnostics + }{ + { + name: `provider config doesn't define "endpoint" and "EC_ENDPOINT" is defined and invalid`, + args: args{ + env: map[string]string{ + "EC_ENDPOINT": "invalid", + }, + config: providerConfig{ + Endpoint: types.String{Null: true}, + }, + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddAttributeError(path.Root("endpoint"), "Value must be a valid URL with scheme (http, https)", "URL is missing host, got invalid") + return diags + }(), + }, + { + name: `provider config and env vars don't define either api key or user login/passwords`, + args: args{ + env: map[string]string{ + "EC_ENDPOINT": "https://cloud.elastic.co/api", + }, + config: providerConfig{ + Endpoint: types.String{Null: true}, + Username: types.String{Null: true}, + }, + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("Unable to create api Client config", "authwriter: 1 error occurred:\n\t* one of apikey or username and password must be specified\n\n") + return diags + }(), + }, + { + name: `provider config is read from environment variables`, + args: args{ + env: map[string]string{ + "EC_ENDPOINT": "https://cloud.elastic.co/api", + "EC_API_KEY": "secret", + "EC_INSECURE": "true", + "EC_TIMEOUT": "1m", + "EC_VERBOSE": "true", + "EC_VERBOSE_CREDENTIALS": "true", + "EC_VERBOSE_FILE": "requests.log", + }, + config: providerConfig{ + Endpoint: types.String{Null: true}, + ApiKey: types.String{Null: true}, + Insecure: types.Bool{Null: true}, + Timeout: types.String{Null: true}, + Verbose: types.Bool{Null: true}, + VerboseCredentials: types.Bool{Null: true}, + VerboseFile: types.String{Null: true}, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var p Provider + + schema, diags := p.GetSchema(context.Background()) + + assert.Nil(t, diags) + + resp := provider.ConfigureResponse{} + + util.GetEnv = func(key string) string { + return tt.args.env[key] + } + + var config types.Object + + diags = tfsdk.ValueFrom(context.Background(), &tt.args.config, schema.Type(), &config) + + assert.Nil(t, diags) + + rawConfig, err := config.ToTerraformValue(context.Background()) + + assert.Nil(t, err) + + p.Configure( + context.Background(), + provider.ConfigureRequest{ + Config: tfsdk.Config{Schema: schema, Raw: rawConfig}, + }, + &resp, + ) + + if tt.diags != nil { + assert.Equal(t, tt.diags, resp.Diagnostics) + } else { + assert.Nil(t, resp.Diagnostics) + } + }) + } +} From f90f2c8cc05bc69238004d641194dba954e88718 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 21 Dec 2022 15:22:32 +0100 Subject: [PATCH 030/104] improve provider tests + address PR comments --- ec/provider.go | 90 ++++++++++++++++++++++++--------------------- ec/provider_test.go | 59 +++++++++++++++++++++++++++++ 2 files changed, 108 insertions(+), 41 deletions(-) diff --git a/ec/provider.go b/ec/provider.go index 06f66f279..474516ad4 100644 --- a/ec/provider.go +++ b/ec/provider.go @@ -183,112 +183,106 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, return } - // Retrieve provider data from configuration var config providerConfig diags := req.Config.Get(ctx, &config) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { return } - var endpoint string + endpoint := config.Endpoint.Value + if config.Endpoint.Null { endpoint = util.MultiGetenvOrDefault([]string{"EC_ENDPOINT", "EC_HOST"}, api.ESSEndpoint) - validateReq := tfsdk.ValidateAttributeRequest{ - AttributePath: path.Root("endpoint"), - AttributeConfig: types.String{Value: endpoint}, - } + diags := validateEndpoint(ctx, endpoint) - validateResp := tfsdk.ValidateAttributeResponse{} + resp.Diagnostics.Append(diags...) - validators.IsURLWithSchemeValidator(validURLSchemes).Validate(ctx, validateReq, &validateResp) - - if validateResp.Diagnostics.HasError() { - resp.Diagnostics.Append(validateResp.Diagnostics...) + if diags.HasError() { return } - } else { - endpoint = config.Endpoint.Value } - var apiKey string + apiKey := config.ApiKey.Value + if config.ApiKey.Null { apiKey = util.MultiGetenvOrDefault([]string{"EC_API_KEY"}, "") - } else { - apiKey = config.ApiKey.Value } - var username string + username := config.Username.Value + if config.Username.Null { username = util.MultiGetenvOrDefault([]string{"EC_USER", "EC_USERNAME"}, "") - } else { - username = config.Username.Value } - var password string + password := config.Password.Value + if config.Password.Null { password = util.MultiGetenvOrDefault([]string{"EC_PASS", "EC_PASSWORD"}, "") - } else { - password = config.Password.Value } - var err error - var insecure bool + insecure := config.Insecure.Value + if config.Insecure.Null { insecureStr := util.MultiGetenvOrDefault([]string{"EC_INSECURE", "EC_SKIP_TLS_VALIDATION"}, "") + + var err error + if insecure, err = util.StringToBool(insecureStr); err != nil { resp.Diagnostics.AddError( "Unable to create client", - fmt.Sprintf("Invalid value %v for insecure", insecureStr), + fmt.Sprintf("Invalid value '%v' in 'EC_INSECURE' or 'EC_SKIP_TLS_VALIDATION'", insecureStr), ) return } - } else { - insecure = config.Insecure.Value } - var timeout string + timeout := config.Timeout.Value + if config.Timeout.Null { timeout = util.MultiGetenvOrDefault([]string{"EC_TIMEOUT"}, defaultTimeout.String()) - } else { - timeout = config.Timeout.Value } - var verbose bool + verbose := config.Verbose.Value + if config.Verbose.Null { verboseStr := util.MultiGetenvOrDefault([]string{"EC_VERBOSE"}, "") + + var err error + if verbose, err = util.StringToBool(verboseStr); err != nil { resp.Diagnostics.AddError( "Unable to create client", - fmt.Sprintf("Invalid value %v for verbose", verboseStr), + fmt.Sprintf("Invalid value '%v' in 'EC_VERBOSE'", verboseStr), ) return } - } else { - verbose = config.Verbose.Value } - var verboseCredentials bool + verboseCredentials := config.VerboseCredentials.Value + if config.VerboseCredentials.Null { verboseCredentialsStr := util.MultiGetenvOrDefault([]string{"EC_VERBOSE_CREDENTIALS"}, "") + + var err error + if verboseCredentials, err = util.StringToBool(verboseCredentialsStr); err != nil { resp.Diagnostics.AddError( "Unable to create client", - fmt.Sprintf("Invalid value %v for verboseCredentials", verboseCredentialsStr), + fmt.Sprintf("Invalid value '%v' in 'EC_VERBOSE_CREDENTIALS'", verboseCredentialsStr), ) return } - } else { - verboseCredentials = config.VerboseCredentials.Value } - var verboseFile string + verboseFile := config.VerboseFile.Value + if config.VerboseFile.Null { verboseFile = util.MultiGetenvOrDefault([]string{"EC_VERBOSE_FILE"}, "request.log") - } else { - verboseFile = config.VerboseFile.Value } cfg, err := newAPIConfig(apiSetup{ @@ -312,6 +306,7 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, } client, err := api.NewAPI(cfg) + if err != nil { resp.Diagnostics.AddError( "Unable to create api Client config", @@ -324,3 +319,16 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, resp.DataSourceData = client resp.ResourceData = client } + +func validateEndpoint(ctx context.Context, endpoint string) diag.Diagnostics { + validateReq := tfsdk.ValidateAttributeRequest{ + AttributePath: path.Root("endpoint"), + AttributeConfig: types.String{Value: endpoint}, + } + + validateResp := tfsdk.ValidateAttributeResponse{} + + validators.IsURLWithSchemeValidator(validURLSchemes).Validate(ctx, validateReq, &validateResp) + + return validateResp.Diagnostics +} diff --git a/ec/provider_test.go b/ec/provider_test.go index 700ac8262..28a51e5f8 100644 --- a/ec/provider_test.go +++ b/ec/provider_test.go @@ -57,6 +57,7 @@ func Test_Configure(t *testing.T) { return diags }(), }, + { name: `provider config and env vars don't define either api key or user login/passwords`, args: args{ @@ -74,6 +75,64 @@ func Test_Configure(t *testing.T) { return diags }(), }, + + { + name: `provider config doesn't define "insecure" and "EC_INSECURE" contains invalid value`, + args: args{ + env: map[string]string{ + "EC_INSECURE": "invalid", + }, + config: providerConfig{ + Endpoint: types.String{Value: "https://cloud.elastic.co/api"}, + ApiKey: types.String{Value: "secret"}, + Insecure: types.Bool{Null: true}, + }, + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("Unable to create client", "Invalid value 'invalid' in 'EC_INSECURE' or 'EC_SKIP_TLS_VALIDATION'") + return diags + }(), + }, + + { + name: `provider config doesn't define "verbose" and "EC_VERBOSE" contains invalid value`, + args: args{ + env: map[string]string{ + "EC_VERBOSE": "invalid", + }, + config: providerConfig{ + Endpoint: types.String{Value: "https://cloud.elastic.co/api"}, + ApiKey: types.String{Value: "secret"}, + Verbose: types.Bool{Null: true}, + }, + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("Unable to create client", "Invalid value 'invalid' in 'EC_VERBOSE'") + return diags + }(), + }, + + { + name: `provider config doesn't define "verbose" and "EC_VERBOSE_CREDENTIALS" contains invalid value`, + args: args{ + env: map[string]string{ + "EC_VERBOSE_CREDENTIALS": "invalid", + }, + config: providerConfig{ + Endpoint: types.String{Value: "https://cloud.elastic.co/api"}, + ApiKey: types.String{Value: "secret"}, + VerboseCredentials: types.Bool{Null: true}, + }, + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("Unable to create client", "Invalid value 'invalid' in 'EC_VERBOSE_CREDENTIALS'") + return diags + }(), + }, + { name: `provider config is read from environment variables`, args: args{ From b0bda3bfaef5431aedec49f4e147ad0f9b1cf656 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 21 Dec 2022 15:47:41 +0100 Subject: [PATCH 031/104] Refactor provider + address PR comments --- ec/provider.go | 33 +++++++++++++++++---------------- ec/provider_config.go | 11 +++-------- ec/provider_config_test.go | 27 ++++++++------------------- ec/provider_test.go | 7 ------- 4 files changed, 28 insertions(+), 50 deletions(-) diff --git a/ec/provider.go b/ec/provider.go index 474516ad4..c09717488 100644 --- a/ec/provider.go +++ b/ec/provider.go @@ -195,7 +195,7 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, endpoint := config.Endpoint.Value - if config.Endpoint.Null { + if config.Endpoint.Null || config.Endpoint.Value == "" { endpoint = util.MultiGetenvOrDefault([]string{"EC_ENDPOINT", "EC_HOST"}, api.ESSEndpoint) diags := validateEndpoint(ctx, endpoint) @@ -209,29 +209,40 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, apiKey := config.ApiKey.Value - if config.ApiKey.Null { + if config.ApiKey.Null || config.ApiKey.Value == "" { apiKey = util.MultiGetenvOrDefault([]string{"EC_API_KEY"}, "") } username := config.Username.Value - if config.Username.Null { + if config.Username.Null || config.Username.Value == "" { username = util.MultiGetenvOrDefault([]string{"EC_USER", "EC_USERNAME"}, "") } password := config.Password.Value - if config.Password.Null { + if config.Password.Null || config.Password.Value == "" { password = util.MultiGetenvOrDefault([]string{"EC_PASS", "EC_PASSWORD"}, "") } + timeoutStr := config.Timeout.Value + + if config.Timeout.Null || config.Timeout.Value == "" { + timeoutStr = util.MultiGetenvOrDefault([]string{"EC_TIMEOUT"}, defaultTimeout.String()) + } + + timeout, err := time.ParseDuration(timeoutStr) + + if err != nil { + resp.Diagnostics.AddError("Unable to create client", err.Error()) + return + } + insecure := config.Insecure.Value if config.Insecure.Null { insecureStr := util.MultiGetenvOrDefault([]string{"EC_INSECURE", "EC_SKIP_TLS_VALIDATION"}, "") - var err error - if insecure, err = util.StringToBool(insecureStr); err != nil { resp.Diagnostics.AddError( "Unable to create client", @@ -241,19 +252,11 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, } } - timeout := config.Timeout.Value - - if config.Timeout.Null { - timeout = util.MultiGetenvOrDefault([]string{"EC_TIMEOUT"}, defaultTimeout.String()) - } - verbose := config.Verbose.Value if config.Verbose.Null { verboseStr := util.MultiGetenvOrDefault([]string{"EC_VERBOSE"}, "") - var err error - if verbose, err = util.StringToBool(verboseStr); err != nil { resp.Diagnostics.AddError( "Unable to create client", @@ -268,8 +271,6 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, if config.VerboseCredentials.Null { verboseCredentialsStr := util.MultiGetenvOrDefault([]string{"EC_VERBOSE_CREDENTIALS"}, "") - var err error - if verboseCredentials, err = util.StringToBool(verboseCredentialsStr); err != nil { resp.Diagnostics.AddError( "Unable to create client", diff --git a/ec/provider_config.go b/ec/provider_config.go index 175d4b45c..8a5f21cbf 100644 --- a/ec/provider_config.go +++ b/ec/provider_config.go @@ -32,7 +32,7 @@ const ( ) var ( - // DefaultHTTPRetries to use for the provider's HTTP Client. + // DefaultHTTPRetries to use for the provider's HTTP client. DefaultHTTPRetries = 2 ) @@ -42,7 +42,7 @@ type apiSetup struct { username string password string insecure bool - timeout string + timeout time.Duration verbose bool verboseCredentials bool verboseFile string @@ -61,11 +61,6 @@ func newAPIConfig(setup apiSetup) (api.Config, error) { return cfg, err } - timeoutDuration, err := time.ParseDuration(setup.timeout) - if err != nil { - return cfg, err - } - verboseCfg, err := verboseSettings( setup.verboseFile, setup.verbose, @@ -82,7 +77,7 @@ func newAPIConfig(setup apiSetup) (api.Config, error) { AuthWriter: authWriter, Host: setup.endpoint, SkipTLSVerify: setup.insecure, - Timeout: timeoutDuration, + Timeout: setup.timeout, UserAgent: userAgent(Version), Retries: DefaultHTTPRetries, }, nil diff --git a/ec/provider_config_test.go b/ec/provider_config_test.go index c3b23c343..10d14760a 100644 --- a/ec/provider_config_test.go +++ b/ec/provider_config_test.go @@ -138,7 +138,7 @@ func Test_newAPIConfig(t *testing.T) { name: "default config returns with authwriter error", args: args{ apiSetup: apiSetup{ - timeout: defaultTimeout.String(), + timeout: defaultTimeout, }, }, err: multierror.NewPrefixed("authwriter", @@ -146,23 +146,12 @@ func Test_newAPIConfig(t *testing.T) { ), }, - { - name: "default config with invalid timeout returns with authwriter error", - args: args{ - apiSetup: apiSetup{ - timeout: "invalid", - apikey: "secret", - }, - }, - err: errors.New(`time: invalid duration "invalid"`), - }, - { name: "custom config with apikey auth succeeds", args: args{ apiSetup: apiSetup{ apikey: "secret", - timeout: defaultTimeout.String(), + timeout: defaultTimeout, endpoint: api.ESSEndpoint, }, }, @@ -183,7 +172,7 @@ func Test_newAPIConfig(t *testing.T) { apiSetup: apiSetup{ username: "my-user", password: "my-pass", - timeout: defaultTimeout.String(), + timeout: defaultTimeout, endpoint: api.ESSEndpoint, }, }, @@ -204,7 +193,7 @@ func Test_newAPIConfig(t *testing.T) { apiSetup: apiSetup{ apikey: "secret", insecure: true, - timeout: defaultTimeout.String(), + timeout: defaultTimeout, endpoint: api.ESSEndpoint, }, }, @@ -227,7 +216,7 @@ func Test_newAPIConfig(t *testing.T) { apikey: "secret", verbose: true, verboseFile: "request.log", - timeout: defaultTimeout.String(), + timeout: defaultTimeout, endpoint: api.ESSEndpoint, }, }, @@ -254,7 +243,7 @@ func Test_newAPIConfig(t *testing.T) { apikey: "secret", verbose: true, verboseFile: customFile.Name(), - timeout: defaultTimeout.String(), + timeout: defaultTimeout, endpoint: api.ESSEndpoint, }, }, @@ -282,7 +271,7 @@ func Test_newAPIConfig(t *testing.T) { verbose: true, verboseFile: customFile.Name(), verboseCredentials: true, - timeout: defaultTimeout.String(), + timeout: defaultTimeout, endpoint: api.ESSEndpoint, }, }, @@ -310,7 +299,7 @@ func Test_newAPIConfig(t *testing.T) { verbose: true, verboseFile: invalidPath, verboseCredentials: true, - timeout: defaultTimeout.String(), + timeout: defaultTimeout, }, }, err: fmt.Errorf(`failed creating verbose file "%s": %w`, diff --git a/ec/provider_test.go b/ec/provider_test.go index 28a51e5f8..50192cfc5 100644 --- a/ec/provider_test.go +++ b/ec/provider_test.go @@ -47,9 +47,6 @@ func Test_Configure(t *testing.T) { env: map[string]string{ "EC_ENDPOINT": "invalid", }, - config: providerConfig{ - Endpoint: types.String{Null: true}, - }, }, diags: func() diag.Diagnostics { var diags diag.Diagnostics @@ -64,10 +61,6 @@ func Test_Configure(t *testing.T) { env: map[string]string{ "EC_ENDPOINT": "https://cloud.elastic.co/api", }, - config: providerConfig{ - Endpoint: types.String{Null: true}, - Username: types.String{Null: true}, - }, }, diags: func() diag.Diagnostics { var diags diag.Diagnostics From 99b37455e872008e19c5e49cdfd4915632aef24c Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 21 Dec 2022 17:03:14 +0100 Subject: [PATCH 032/104] address PR comments --- ec/internal/validators/urlvalidator.go | 2 +- ec/provider_config.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ec/internal/validators/urlvalidator.go b/ec/internal/validators/urlvalidator.go index 8de0dbc78..981f0530e 100644 --- a/ec/internal/validators/urlvalidator.go +++ b/ec/internal/validators/urlvalidator.go @@ -91,7 +91,7 @@ func (v isURLWithSchemeValidator) Validate(ctx context.Context, req tfsdk.Valida resp.Diagnostics.AddAttributeError( req.AttributePath, v.Description(ctx), - fmt.Sprintf("URL is expected to have a valid scheme, got %v (%v)", u.Scheme, str.Value), + fmt.Sprintf("URL is expected to have a valid scheme (one of '%v'), got %v (%v)", v.ValidSchemes, u.Scheme, str.Value), ) } } diff --git a/ec/provider_config.go b/ec/provider_config.go index 8a5f21cbf..cd4d5007a 100644 --- a/ec/provider_config.go +++ b/ec/provider_config.go @@ -95,7 +95,7 @@ func verboseSettings(name string, verbose, redactAuth bool) (api.VerboseSettings } return api.VerboseSettings{ - Verbose: verbose, + Verbose: true, RedactAuth: redactAuth, Device: f, }, nil From 8c396b27bcdcb35759d5550d5fc6bffc65c9e17c Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 21 Dec 2022 17:32:08 +0100 Subject: [PATCH 033/104] use plan modifiers from Hashicorp where possible --- .../elasticsearch/v1/schema.go | 6 +- .../elasticsearch/v2/schema.go | 6 +- ec/internal/validators/length.go | 69 ------------------- ec/internal/validators/notempty.go | 58 ---------------- ec/internal/validators/oneOf.go | 64 ----------------- 5 files changed, 6 insertions(+), 197 deletions(-) delete mode 100644 ec/internal/validators/length.go delete mode 100644 ec/internal/validators/notempty.go delete mode 100644 ec/internal/validators/oneOf.go diff --git a/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go b/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go index d6a6f3a70..7a9efc251 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go @@ -22,8 +22,8 @@ import ( "strings" "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" - "github.com/elastic/terraform-provider-ec/ec/internal/validators" "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/path" @@ -362,7 +362,7 @@ func ElasticsearchExtensionSchema() tfsdk.Attribute { Description: "Extension type, only `bundle` or `plugin` are supported.", Type: types.StringType, Required: true, - Validators: []tfsdk.AttributeValidator{validators.OneOf([]string{`"bundle"`, `"plugin"`})}, + Validators: []tfsdk.AttributeValidator{stringvalidator.OneOf("bundle", "plugin")}, }, "version": { Description: "Elasticsearch compatibility version. Bundles should specify major or minor versions with wildcards, such as `7.*` or `*` but **plugins must use full version notation down to the patch level**, such as `7.10.1` and wildcards are not allowed.", @@ -448,7 +448,7 @@ func ElasticsearchStrategySchema() tfsdk.Attribute { Description: "Configuration strategy type " + strings.Join(strategiesList, ", "), Type: types.StringType, Required: true, - Validators: []tfsdk.AttributeValidator{validators.OneOf(strategiesList)}, + Validators: []tfsdk.AttributeValidator{stringvalidator.OneOf("bundle", "plugin")}, // TODO // changes on this setting do not change the plan. // DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go index 14388d0a7..ef67b4995 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go @@ -21,7 +21,7 @@ import ( "strings" "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" - "github.com/elastic/terraform-provider-ec/ec/internal/validators" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" @@ -120,7 +120,7 @@ func ElasticsearchSchema() tfsdk.Attribute { Description: "Configuration strategy type " + strings.Join(strategiesList, ", "), Type: types.StringType, Optional: true, - Validators: []tfsdk.AttributeValidator{validators.OneOf(strategiesList)}, + Validators: []tfsdk.AttributeValidator{stringvalidator.OneOf("bundle", "plugin")}, }, }), } @@ -301,7 +301,7 @@ func ElasticsearchExtensionSchema() tfsdk.Attribute { Description: "Extension type, only `bundle` or `plugin` are supported.", Type: types.StringType, Required: true, - Validators: []tfsdk.AttributeValidator{validators.OneOf([]string{`"bundle"`, `"plugin"`})}, + Validators: []tfsdk.AttributeValidator{stringvalidator.OneOf("bundle", "plugin")}, }, "version": { Description: "Elasticsearch compatibility version. Bundles should specify major or minor versions with wildcards, such as `7.*` or `*` but **plugins must use full version notation down to the patch level**, such as `7.10.1` and wildcards are not allowed.", diff --git a/ec/internal/validators/length.go b/ec/internal/validators/length.go deleted file mode 100644 index 4e9bf1659..000000000 --- a/ec/internal/validators/length.go +++ /dev/null @@ -1,69 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package validators - -import ( - "context" - "fmt" - - "github.com/hashicorp/terraform-plugin-framework/tfsdk" -) - -type lengthValidator struct { - min int - max int -} - -// Description returns a plain text description of the validator's behavior, suitable for a practitioner to understand its impact. -func (v lengthValidator) Description(ctx context.Context) string { - return "Value must not be empty" -} - -// MarkdownDescription returns a markdown formatted description of the validator's behavior, suitable for a practitioner to understand its impact. -func (v lengthValidator) MarkdownDescription(ctx context.Context) string { - return v.Description(ctx) -} - -// Validate runs the main validation logic of the validator, reading configuration data out of `req` and updating `resp` with diagnostics. -func (v lengthValidator) Validate(ctx context.Context, req tfsdk.ValidateAttributeRequest, resp *tfsdk.ValidateAttributeResponse) { - if req.AttributeConfig.IsUnknown() || req.AttributeConfig.IsNull() { - resp.Diagnostics.AddAttributeError( - req.AttributePath, - v.Description(ctx), - "Value must be set", - ) - return - } - - if length := len(req.AttributeConfig.String()); length < v.min || length > v.max { - resp.Diagnostics.AddAttributeError( - req.AttributePath, - v.Description(ctx), - fmt.Sprintf("Length should be between %d and %d", v.min, v.max), - ) - return - } -} - -// Length returns an AttributeValidator which ensures that any configured -// attribute value: -// -// - Has a length between min and max. -func Length(min, max int) tfsdk.AttributeValidator { - return lengthValidator{min: min, max: max} -} diff --git a/ec/internal/validators/notempty.go b/ec/internal/validators/notempty.go deleted file mode 100644 index d7cd5feb4..000000000 --- a/ec/internal/validators/notempty.go +++ /dev/null @@ -1,58 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package validators - -import ( - "context" - - "github.com/hashicorp/terraform-plugin-framework/tfsdk" -) - -type notEmptyValidator struct{} - -// Description returns a plain text description of the validator's behavior, suitable for a practitioner to understand its impact. -func (v notEmptyValidator) Description(ctx context.Context) string { - return "Value must not be empty" -} - -// MarkdownDescription returns a markdown formatted description of the validator's behavior, suitable for a practitioner to understand its impact. -func (v notEmptyValidator) MarkdownDescription(ctx context.Context) string { - return v.Description(ctx) -} - -// Validate runs the main validation logic of the validator, reading configuration data out of `req` and updating `resp` with diagnostics. -func (v notEmptyValidator) Validate(ctx context.Context, req tfsdk.ValidateAttributeRequest, resp *tfsdk.ValidateAttributeResponse) { - if req.AttributeConfig.IsUnknown() || req.AttributeConfig.IsNull() || req.AttributeConfig.String() == "" { - resp.Diagnostics.AddAttributeError( - req.AttributePath, - v.Description(ctx), - "Value must be set and not empty", - ) - return - } -} - -// NotEmpty returns an AttributeValidator which ensures that any configured -// attribute value: -// -// - Is known. -// - Is set. -// - Is no empty string. -func NotEmpty() tfsdk.AttributeValidator { - return notEmptyValidator{} -} diff --git a/ec/internal/validators/oneOf.go b/ec/internal/validators/oneOf.go deleted file mode 100644 index 74f864321..000000000 --- a/ec/internal/validators/oneOf.go +++ /dev/null @@ -1,64 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package validators - -import ( - "context" - "fmt" - - "github.com/elastic/cloud-sdk-go/pkg/util/slice" - "github.com/hashicorp/terraform-plugin-framework/tfsdk" -) - -type oneOf struct { - values []string -} - -// Description returns a plain text description of the validator's behavior, suitable for a practitioner to understand its impact. -func (v oneOf) Description(ctx context.Context) string { - return "Value must not be empty" -} - -// MarkdownDescription returns a markdown formatted description of the validator's behavior, suitable for a practitioner to understand its impact. -func (v oneOf) MarkdownDescription(ctx context.Context) string { - return v.Description(ctx) -} - -// Validate runs the main validation logic of the validator, reading configuration data out of `req` and updating `resp` with diagnostics. -func (v oneOf) Validate(ctx context.Context, req tfsdk.ValidateAttributeRequest, resp *tfsdk.ValidateAttributeResponse) { - if req.AttributeConfig.IsNull() || req.AttributeConfig.IsUnknown() { - return - } - - if value := req.AttributeConfig.String(); !slice.HasString(v.values, value) { - resp.Diagnostics.AddAttributeError( - req.AttributePath, - v.Description(ctx), - fmt.Sprintf("%v - invalid extension type %s: accepted values are %v", req.AttributePath, value, v.values), - ) - return - } -} - -// OneOf returns an AttributeValidator which ensures that any configured -// attribute value: -// -// - Is one of the accepted values. -func OneOf(values []string) tfsdk.AttributeValidator { - return oneOf{values: values} -} From 0df8f99ae8765ea80bb4efc488945c63ee76c2e0 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 21 Dec 2022 18:08:42 +0100 Subject: [PATCH 034/104] refactor ExtractEndpointsTF --- ec/internal/converters/extract_endpoint.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/ec/internal/converters/extract_endpoint.go b/ec/internal/converters/extract_endpoint.go index 2ae395fba..360c40a74 100644 --- a/ec/internal/converters/extract_endpoint.go +++ b/ec/internal/converters/extract_endpoint.go @@ -29,16 +29,14 @@ import ( // https endpoints and returns a map with two keys: `http_endpoint` and // `https_endpoint` func ExtractEndpointsTF(metadata *models.ClusterMetadataInfo) (httpEndpoint, httpsEndpoint types.String) { - if metadata == nil || metadata.Endpoint == "" || metadata.Ports == nil { - return - } + httpEndpointStr, httpsEndpointStr := ExtractEndpoints(metadata) - if metadata.Ports.HTTP != nil { - httpEndpoint = types.String{Value: fmt.Sprintf("http://%s:%d", metadata.Endpoint, *metadata.Ports.HTTP)} + if httpEndpointStr != nil { + httpEndpoint = types.String{Value: *httpEndpointStr} } - if metadata.Ports.HTTPS != nil { - httpsEndpoint = types.String{Value: fmt.Sprintf("https://%s:%d", metadata.Endpoint, *metadata.Ports.HTTPS)} + if httpsEndpointStr != nil { + httpsEndpoint = types.String{Value: *httpsEndpointStr} } return From f194f76a6a1a91f2ccf34d5604a38e59e42f2d6d Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 21 Dec 2022 18:23:44 +0100 Subject: [PATCH 035/104] conversion funcs renaming --- ec/ecdatasource/deploymentdatasource/datasource.go | 2 +- ec/ecresource/deploymentresource/apm/v2/apm_topology.go | 2 +- .../deploymentresource/deployment/v2/deployment.go | 6 +++--- .../elasticsearch/v2/elasticsearch_topology.go | 2 +- .../enterprisesearch/v2/enterprise_search_topology.go | 2 +- .../integrationsserver/v2/integrations_server_topology.go | 2 +- .../deploymentresource/kibana/v2/kibana_topology.go | 2 +- ec/internal/converters/convert_tags.go | 8 ++++---- ec/internal/converters/convert_tags_test.go | 2 +- ec/internal/converters/extract_endpoint.go | 2 +- ec/internal/converters/parse_topology_size.go | 2 +- 11 files changed, 16 insertions(+), 16 deletions(-) diff --git a/ec/ecdatasource/deploymentdatasource/datasource.go b/ec/ecdatasource/deploymentdatasource/datasource.go index d4768a708..fcfc540ab 100644 --- a/ec/ecdatasource/deploymentdatasource/datasource.go +++ b/ec/ecdatasource/deploymentdatasource/datasource.go @@ -129,7 +129,7 @@ func modelToState(ctx context.Context, res *models.DeploymentGetResponse, state diags.Append(flattenEnterpriseSearchResources(ctx, res.Resources.EnterpriseSearch, &state.EnterpriseSearch)...) if res.Metadata != nil { - state.Tags = converters.TagsToTypeMap(res.Metadata.Tags) + state.Tags = converters.ModelsTagsToTypesMap(res.Metadata.Tags) } return diags diff --git a/ec/ecresource/deploymentresource/apm/v2/apm_topology.go b/ec/ecresource/deploymentresource/apm/v2/apm_topology.go index 444a9b527..35dde36ed 100644 --- a/ec/ecresource/deploymentresource/apm/v2/apm_topology.go +++ b/ec/ecresource/deploymentresource/apm/v2/apm_topology.go @@ -97,7 +97,7 @@ func apmTopologyPayload(ctx context.Context, topology v1.TopologyTF, planModels icID = planModels[index].InstanceConfigurationID } - size, err := converters.ParseTopologySizeTF(topology.Size, topology.SizeResource) + size, err := converters.ParseTopologySizeTypes(topology.Size, topology.SizeResource) var diags diag.Diagnostics if err != nil { diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment.go b/ec/ecresource/deploymentresource/deployment/v2/deployment.go index d4cdc0853..281070269 100644 --- a/ec/ecresource/deploymentresource/deployment/v2/deployment.go +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment.go @@ -138,7 +138,7 @@ func ReadDeployment(res *models.DeploymentGetResponse, remotes *models.RemoteRes dep.Name = *res.Name if res.Metadata != nil { - dep.Tags = converters.TagsToMap(res.Metadata.Tags) + dep.Tags = converters.ModelsTagsToMap(res.Metadata.Tags) } if res.Resources == nil { @@ -296,7 +296,7 @@ func (dep DeploymentTF) CreateRequest(ctx context.Context, client *api.API) (*mo result.Settings.Observability = observabilityPayload - result.Metadata.Tags, diags = converters.TFmapToTags(ctx, dep.Tags) + result.Metadata.Tags, diags = converters.TypesMapToModelsTags(ctx, dep.Tags) if diags.HasError() { diagsnostics.Append(diags...) @@ -505,7 +505,7 @@ func (plan DeploymentTF) UpdateRequest(ctx context.Context, client *api.API, sta result.Settings.Observability = &models.DeploymentObservabilitySettings{} } - result.Metadata.Tags, diags = converters.TFmapToTags(ctx, plan.Tags) + result.Metadata.Tags, diags = converters.TypesMapToModelsTags(ctx, plan.Tags) if diags.HasError() { diagsnostics.Append(diags...) } diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go index dba27a3cc..33769ea8f 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go @@ -81,7 +81,7 @@ func (topology ElasticsearchTopologyTF) Payload(ctx context.Context, topologyID return diags } - size, err := converters.ParseTopologySizeTF(topology.Size, topology.SizeResource) + size, err := converters.ParseTopologySizeTypes(topology.Size, topology.SizeResource) if err != nil { diags.AddError("size parsing error", err.Error()) } diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_topology.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_topology.go index 1546ce7c8..9c22a6d3c 100644 --- a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_topology.go +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_topology.go @@ -105,7 +105,7 @@ func enterpriseSearchTopologyPayload(ctx context.Context, topology v1.Enterprise return nil, diags } - size, err := converters.ParseTopologySizeTF(topology.Size, topology.SizeResource) + size, err := converters.ParseTopologySizeTypes(topology.Size, topology.SizeResource) if err != nil { diags.AddError("failed parse enterprise search topology size", err.Error()) diff --git a/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_topology.go b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_topology.go index 5a4a728b3..0be4acdf8 100644 --- a/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_topology.go +++ b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_topology.go @@ -48,7 +48,7 @@ func integrationsServerTopologyPayload(ctx context.Context, topology topologyv1. var diags diag.Diagnostics - size, err := converters.ParseTopologySizeTF(topology.Size, topology.SizeResource) + size, err := converters.ParseTopologySizeTypes(topology.Size, topology.SizeResource) if err != nil { diags.AddError("parse topology error", err.Error()) return nil, diags diff --git a/ec/ecresource/deploymentresource/kibana/v2/kibana_topology.go b/ec/ecresource/deploymentresource/kibana/v2/kibana_topology.go index 9e44cd7b2..aeda2d978 100644 --- a/ec/ecresource/deploymentresource/kibana/v2/kibana_topology.go +++ b/ec/ecresource/deploymentresource/kibana/v2/kibana_topology.go @@ -104,7 +104,7 @@ func kibanaTopologyPayload(ctx context.Context, topology v1.TopologyTF, planMode icID = planModels[index].InstanceConfigurationID } - size, err := converters.ParseTopologySizeTF(topology.Size, topology.SizeResource) + size, err := converters.ParseTopologySizeTypes(topology.Size, topology.SizeResource) var diags diag.Diagnostics if err != nil { diff --git a/ec/internal/converters/convert_tags.go b/ec/internal/converters/convert_tags.go index db5f787d9..9a4473b85 100644 --- a/ec/internal/converters/convert_tags.go +++ b/ec/internal/converters/convert_tags.go @@ -32,7 +32,7 @@ import ( // flattenTags takes in Deployment Metadata resource models and returns its // Tags in flattened form. -func TagsToTypeMap(metadataItems []*models.MetadataItem) types.Map { +func ModelsTagsToTypesMap(metadataItems []*models.MetadataItem) types.Map { var tags = make(map[string]attr.Value) for _, res := range metadataItems { if res.Key != nil { @@ -44,7 +44,7 @@ func TagsToTypeMap(metadataItems []*models.MetadataItem) types.Map { // flattenTags takes in Deployment Metadata resource models and returns its // Tags as Go map -func TagsToMap(metadataItems []*models.MetadataItem) map[string]string { +func ModelsTagsToMap(metadataItems []*models.MetadataItem) map[string]string { if len(metadataItems) == 0 { return nil } @@ -57,7 +57,7 @@ func TagsToMap(metadataItems []*models.MetadataItem) map[string]string { return res } -func MapToTags(raw map[string]string) []*models.MetadataItem { +func MapToModelsTags(raw map[string]string) []*models.MetadataItem { result := make([]*models.MetadataItem, 0, len(raw)) for k, v := range raw { result = append(result, &models.MetadataItem{ @@ -74,7 +74,7 @@ func MapToTags(raw map[string]string) []*models.MetadataItem { return result } -func TFmapToTags(ctx context.Context, raw types.Map) ([]*models.MetadataItem, diag.Diagnostics) { +func TypesMapToModelsTags(ctx context.Context, raw types.Map) ([]*models.MetadataItem, diag.Diagnostics) { result := make([]*models.MetadataItem, 0, len(raw.Elems)) for k, v := range raw.Elems { var tag string diff --git a/ec/internal/converters/convert_tags_test.go b/ec/internal/converters/convert_tags_test.go index 535f03433..4a19875b6 100644 --- a/ec/internal/converters/convert_tags_test.go +++ b/ec/internal/converters/convert_tags_test.go @@ -72,7 +72,7 @@ func TestFlattenTags(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result := TagsToTypeMap(tt.args.metadata.Tags) + result := ModelsTagsToTypesMap(tt.args.metadata.Tags) got := make(map[string]string, len(result.Elems)) result.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) diff --git a/ec/internal/converters/extract_endpoint.go b/ec/internal/converters/extract_endpoint.go index 360c40a74..32077952c 100644 --- a/ec/internal/converters/extract_endpoint.go +++ b/ec/internal/converters/extract_endpoint.go @@ -28,7 +28,7 @@ import ( // FlattenClusterEndpoint receives a ClusterMetadataInfo, parses the http and // https endpoints and returns a map with two keys: `http_endpoint` and // `https_endpoint` -func ExtractEndpointsTF(metadata *models.ClusterMetadataInfo) (httpEndpoint, httpsEndpoint types.String) { +func ExtractEndpointsToTypes(metadata *models.ClusterMetadataInfo) (httpEndpoint, httpsEndpoint types.String) { httpEndpointStr, httpsEndpointStr := ExtractEndpoints(metadata) if httpEndpointStr != nil { diff --git a/ec/internal/converters/parse_topology_size.go b/ec/internal/converters/parse_topology_size.go index 3927ecf71..a52f8159b 100644 --- a/ec/internal/converters/parse_topology_size.go +++ b/ec/internal/converters/parse_topology_size.go @@ -24,7 +24,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" ) -func ParseTopologySizeTF(size, sizeResource types.String) (*models.TopologySize, error) { +func ParseTopologySizeTypes(size, sizeResource types.String) (*models.TopologySize, error) { return ParseTopologySize(&size.Value, &sizeResource.Value) } From 06758635fe33b8214244e1bdc0567c5d258c1be0 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 21 Dec 2022 18:32:40 +0100 Subject: [PATCH 036/104] remove obsolete comment --- ec/ecdatasource/deploymentdatasource/datasource.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/ec/ecdatasource/deploymentdatasource/datasource.go b/ec/ecdatasource/deploymentdatasource/datasource.go index fcfc540ab..a38fa8840 100644 --- a/ec/ecdatasource/deploymentdatasource/datasource.go +++ b/ec/ecdatasource/deploymentdatasource/datasource.go @@ -96,14 +96,6 @@ func (d DataSource) Read(ctx context.Context, request datasource.ReadRequest, re response.Diagnostics.Append(response.State.Set(ctx, newState)...) } -/* - TODO - see https://github.com/multani/terraform-provider-camunda/pull/16/files - - Timeouts: &schema.ResourceTimeout{ - Default: schema.DefaultTimeout(5 * time.Minute), - }, -*/ - func modelToState(ctx context.Context, res *models.DeploymentGetResponse, state *modelV0) diag.Diagnostics { var diags diag.Diagnostics From 4fa42b2af95e38ceeab56581c5763e7390930069 Mon Sep 17 00:00:00 2001 From: Dmitry Onishchenko <8962171+dimuon@users.noreply.github.com> Date: Thu, 22 Dec 2022 12:25:24 +0100 Subject: [PATCH 037/104] Apply suggestions from code review Co-authored-by: Toby Brain --- .../deploymentdatasource/schema.go | 4 ++-- .../deploymentdatasource/schema_apm.go | 6 +++--- .../schema_elasticsearch.go | 20 +++++++++---------- .../schema_enterprise_search.go | 4 ++-- .../schema_integrations_server.go | 4 ++-- .../deploymentdatasource/schema_kibana.go | 2 +- .../schema_observability.go | 4 ++-- .../deploymentsdatasource/schema.go | 8 ++++---- ec/ecdatasource/stackdatasource/schema.go | 8 ++++---- .../deploymentresource/apm/v1/schema.go | 2 +- .../deploymentresource/apm/v2/apm.go | 3 --- .../deploymentresource/apm/v2/apm_config.go | 6 ++---- .../deploymentresource/apm/v2/schema.go | 2 +- ec/ecresource/deploymentresource/create.go | 2 +- .../deployment/v1/schema.go | 14 ++++++------- .../deployment/v2/schema.go | 16 +++++++-------- .../elasticsearch/v1/schema.go | 2 +- 17 files changed, 51 insertions(+), 56 deletions(-) diff --git a/ec/ecdatasource/deploymentdatasource/schema.go b/ec/ecdatasource/deploymentdatasource/schema.go index 3d5f8732b..acbfe4f3e 100644 --- a/ec/ecdatasource/deploymentdatasource/schema.go +++ b/ec/ecdatasource/deploymentdatasource/schema.go @@ -50,12 +50,12 @@ func (d *DataSource) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnost }, "region": { Type: types.StringType, - Description: "Region where the deployment can be found.", + Description: "Region where the deployment is hosted.", Computed: true, }, "deployment_template_id": { Type: types.StringType, - Description: "ID of the deployment template used to create the deployment.", + Description: "ID of the deployment template this deployment is based off.", Computed: true, }, "traffic_filter": { diff --git a/ec/ecdatasource/deploymentdatasource/schema_apm.go b/ec/ecdatasource/deploymentdatasource/schema_apm.go index 9fda9fdf7..8dcb10ed1 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_apm.go +++ b/ec/ecdatasource/deploymentdatasource/schema_apm.go @@ -32,7 +32,7 @@ func apmResourceInfoSchema() tfsdk.Attribute { Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ "elasticsearch_cluster_ref_id": { Type: types.StringType, - Description: "The user-specified ID of the Elasticsearch cluster to which this resource kind will link.", + Description: "The locally-unique user-specified id of an Elasticsearch Resource.", Computed: true, }, "healthy": { @@ -52,7 +52,7 @@ func apmResourceInfoSchema() tfsdk.Attribute { }, "ref_id": { Type: types.StringType, - Description: "User specified ref_id for the resource kind.", + Description: "A locally-unique friendly alias for this APM resource.", Computed: true, }, "resource_id": { @@ -92,7 +92,7 @@ func apmTopologySchema() tfsdk.Attribute { }, "size": { Type: types.StringType, - Description: "Amount of resource per topology element in the \"g\" notation.", + Description: "Amount of size_resource in Gigabytes. For example \"4g\".", Computed: true, }, "size_resource": { diff --git a/ec/ecdatasource/deploymentdatasource/schema_elasticsearch.go b/ec/ecdatasource/deploymentdatasource/schema_elasticsearch.go index 38eacacb1..e79e0cb6c 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_elasticsearch.go +++ b/ec/ecdatasource/deploymentdatasource/schema_elasticsearch.go @@ -42,8 +42,8 @@ func elasticsearchResourceInfoSchema() tfsdk.Attribute { }, "cloud_id": { Type: types.StringType, - Description: "The encoded Elasticsearch credentials to use in Beats or Logstash.", - MarkdownDescription: "The encoded Elasticsearch credentials to use in Beats or Logstash. See [Configure Beats and Logstash with Cloud ID](https://www.elastic.co/guide/en/cloud/current/ec-cloud-id.html) for more information.", + Description: "The cloud ID, an encoded string that provides other Elastic services with the necessary information to connect to this Elasticsearch and Kibana.", + MarkdownDescription: "The cloud ID, an encoded string that provides other Elastic services with the necessary information to connect to this Elasticsearch and Kibana. See [Configure Beats and Logstash with Cloud ID](https://www.elastic.co/guide/en/cloud/current/ec-cloud-id.html) for more information.", Computed: true, }, "http_endpoint": { @@ -58,7 +58,7 @@ func elasticsearchResourceInfoSchema() tfsdk.Attribute { }, "ref_id": { Type: types.StringType, - Description: "User specified ref_id for the resource kind.", + Description: "A locally-unique friendly alias for this Elasticsearch cluster.", Computed: true, }, "resource_id": { @@ -113,7 +113,7 @@ func elasticsearchTopologySchema() tfsdk.Attribute { }, "node_type_data": { Type: types.BoolType, - Description: "Defines whether this node can hold data (<7.10.0).", + Description: "Defines whether this node can hold data (<8.0).", Computed: true, }, "node_type_master": { @@ -133,7 +133,7 @@ func elasticsearchTopologySchema() tfsdk.Attribute { }, "node_roles": { Type: types.SetType{ElemType: types.StringType}, - Description: "Defines the list of Elasticsearch node roles assigned to the topology element (>=7.10.0).", + Description: "Defines the list of Elasticsearch node roles assigned to the topology element. This is supported from v7.10, and required from v8.", Computed: true, }, "autoscaling": elasticsearchAutoscalingSchema(), @@ -153,27 +153,27 @@ func elasticsearchAutoscalingSchema() tfsdk.Attribute { Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ "max_size_resource": { Type: types.StringType, - Description: "Maximum resource type for the maximum autoscaling setting.", + Description: "Resource type used when specifying the maximum size the tier can scale up to.", Computed: true, }, "max_size": { Type: types.StringType, - Description: "Maximum size value for the maximum autoscaling setting.", + Description: "Maximum size the tier can scale up to, e.g \"64g\".", Computed: true, }, "min_size_resource": { Type: types.StringType, - Description: "Minimum resource type for the minimum autoscaling setting.", + Description: "Resource type used when specifying the minimum size the tier can scale down to when bidirectional autoscaling is supported.", Computed: true, }, "min_size": { Type: types.StringType, - Description: "Minimum size value for the minimum autoscaling setting.", + Description: "Minimum size the tier can scale down to when bidirectional autoscaling is supported.", Computed: true, }, "policy_override_json": { Type: types.StringType, - Description: "Computed policy overrides set directly via the API or other clients.", + Description: "An arbitrary JSON object overriding the default autoscaling policy. Don't set unless you really know what you are doing.", Computed: true, }, }), diff --git a/ec/ecdatasource/deploymentdatasource/schema_enterprise_search.go b/ec/ecdatasource/deploymentdatasource/schema_enterprise_search.go index 3f1b76e76..3dbb3e292 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_enterprise_search.go +++ b/ec/ecdatasource/deploymentdatasource/schema_enterprise_search.go @@ -32,7 +32,7 @@ func enterpriseSearchResourceInfoSchema() tfsdk.Attribute { Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ "elasticsearch_cluster_ref_id": { Type: types.StringType, - Description: "The user-specified ID of the Elasticsearch cluster to which this resource kind will link.", + Description: "A locally-unique friendly alias for an Elasticsearch resource in this deployment.", Computed: true, }, "healthy": { @@ -52,7 +52,7 @@ func enterpriseSearchResourceInfoSchema() tfsdk.Attribute { }, "ref_id": { Type: types.StringType, - Description: "User specified ref_id for the resource kind.", + Description: "A locally-unique friendly alias for this Enterprise Search resource.", Computed: true, }, "resource_id": { diff --git a/ec/ecdatasource/deploymentdatasource/schema_integrations_server.go b/ec/ecdatasource/deploymentdatasource/schema_integrations_server.go index 3fbbbd683..5314ba8ce 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_integrations_server.go +++ b/ec/ecdatasource/deploymentdatasource/schema_integrations_server.go @@ -32,7 +32,7 @@ func integrationsServerResourceInfoSchema() tfsdk.Attribute { Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ "elasticsearch_cluster_ref_id": { Type: types.StringType, - Description: "The user-specified ID of the Elasticsearch cluster to which this resource kind will link.", + Description: "A locally-unique friendly alias for an Elasticsearch resource in this deployment.", Computed: true, }, "healthy": { @@ -52,7 +52,7 @@ func integrationsServerResourceInfoSchema() tfsdk.Attribute { }, "ref_id": { Type: types.StringType, - Description: "User specified ref_id for the resource kind.", + Description: "A locally-unique friendly alias for this Integrations Server resource.", Computed: true, }, "resource_id": { diff --git a/ec/ecdatasource/deploymentdatasource/schema_kibana.go b/ec/ecdatasource/deploymentdatasource/schema_kibana.go index db37223bb..c127d659b 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_kibana.go +++ b/ec/ecdatasource/deploymentdatasource/schema_kibana.go @@ -52,7 +52,7 @@ func kibanaResourceInfoSchema() tfsdk.Attribute { }, "ref_id": { Type: types.StringType, - Description: "User specified ref_id for the resource kind.", + Description: "A locally-unique friendly alias for this Kibana resource.", Computed: true, }, "resource_id": { diff --git a/ec/ecdatasource/deploymentdatasource/schema_observability.go b/ec/ecdatasource/deploymentdatasource/schema_observability.go index c1ba83216..c4acf4471 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_observability.go +++ b/ec/ecdatasource/deploymentdatasource/schema_observability.go @@ -42,12 +42,12 @@ func observabilitySettingsSchema() tfsdk.Attribute { }, "logs": { Type: types.BoolType, - Description: "Defines whether logs are enabled or disabled.", + Description: "Defines whether logs are shipped to the destination deployment.", Computed: true, }, "metrics": { Type: types.BoolType, - Description: "Defines whether metrics are enabled or disabled.", + Description: "Defines whether metrics are shipped to the destination deployment.", Computed: true, }, }), diff --git a/ec/ecdatasource/deploymentsdatasource/schema.go b/ec/ecdatasource/deploymentsdatasource/schema.go index cd6bbd122..75611be84 100644 --- a/ec/ecdatasource/deploymentsdatasource/schema.go +++ b/ec/ecdatasource/deploymentsdatasource/schema.go @@ -44,22 +44,22 @@ func (d *DataSource) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnost Attributes: map[string]tfsdk.Attribute{ "name_prefix": { Type: types.StringType, - Description: "Prefix that one or several deployment names have in common.", + Description: "Prefix to filter the returned deployment list by.", Optional: true, }, "healthy": { Type: types.StringType, - Description: "Overall health status of the deployment.", + Description: "Filter the result set by their health status.", Optional: true, }, "deployment_template_id": { Type: types.StringType, - Description: "ID of the deployment template used to create the deployment.", + Description: "Filter the result set by the ID of the deployment template the deployment is based off.", Optional: true, }, "tags": { Type: types.MapType{ElemType: types.StringType}, - Description: "Key value map of arbitrary string tags for the deployment.\n", + Description: "Filter the result set by their assigned tags.", Optional: true, }, "size": { diff --git a/ec/ecdatasource/stackdatasource/schema.go b/ec/ecdatasource/stackdatasource/schema.go index b9e5773a8..58f98d279 100644 --- a/ec/ecdatasource/stackdatasource/schema.go +++ b/ec/ecdatasource/stackdatasource/schema.go @@ -99,12 +99,12 @@ func elasticSearchConfigSchema() tfsdk.Attribute { }, "capacity_constraints_max": { Type: types.Int64Type, - Description: "Minimum size of the instances.", + Description: "Maximum size of the instances.", Computed: true, }, "capacity_constraints_min": { Type: types.Int64Type, - Description: "Maximum size of the instances.", + Description: "Minimum size of the instances.", Computed: true, }, "compatible_node_types": { @@ -156,12 +156,12 @@ func resourceKindConfigSchema(resourceKind ResourceKind) tfsdk.Attribute { }, "capacity_constraints_max": { Type: types.Int64Type, - Description: "Minimum size of the instances.", + Description: "Maximum size of the instances.", Computed: true, }, "capacity_constraints_min": { Type: types.Int64Type, - Description: "Maximum size of the instances.", + Description: "Minimum size of the instances.", Computed: true, }, "compatible_node_types": { diff --git a/ec/ecresource/deploymentresource/apm/v1/schema.go b/ec/ecresource/deploymentresource/apm/v1/schema.go index b62b9102d..49877fafa 100644 --- a/ec/ecresource/deploymentresource/apm/v1/schema.go +++ b/ec/ecresource/deploymentresource/apm/v1/schema.go @@ -81,7 +81,7 @@ func ApmConfigSchema() tfsdk.Attribute { // DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, "docker_image": { Type: types.StringType, - Description: "Optionally override the docker image the APM nodes will use. Note that this field will only work for internal users only.", + Description: "Optionally override the docker image the APM nodes will use. This option will not work in ESS customers and should only be changed if you know what you're doing.", Optional: true, }, "debug_enabled": { diff --git a/ec/ecresource/deploymentresource/apm/v2/apm.go b/ec/ecresource/deploymentresource/apm/v2/apm.go index cad680c1f..a30311d9c 100644 --- a/ec/ecresource/deploymentresource/apm/v2/apm.go +++ b/ec/ecresource/deploymentresource/apm/v2/apm.go @@ -80,11 +80,8 @@ func ReadApm(in *models.ApmResourceInfo) (*Apm, error) { var apm Apm apm.RefId = in.RefID - apm.ResourceId = in.Info.ID - apm.Region = in.Region - plan := in.Info.PlanInfo.Current.Plan topologies, err := ReadApmTopologies(plan.ClusterTopology) diff --git a/ec/ecresource/deploymentresource/apm/v2/apm_config.go b/ec/ecresource/deploymentresource/apm/v2/apm_config.go index dc6dc197a..1fe8d6c6f 100644 --- a/ec/ecresource/deploymentresource/apm/v2/apm_config.go +++ b/ec/ecresource/deploymentresource/apm/v2/apm_config.go @@ -57,10 +57,8 @@ func readApmConfigs(in *models.ApmConfiguration) (v1.ApmConfigs, error) { cfg.DockerImage = &in.DockerImage } - if in.SystemSettings != nil { - if in.SystemSettings.DebugEnabled != nil { - cfg.DebugEnabled = in.SystemSettings.DebugEnabled - } + if in.SystemSettings != nil && in.SystemSettings.DebugEnabled != nil { + cfg.DebugEnabled = in.SystemSettings.DebugEnabled } if cfg == (ApmConfig{}) { diff --git a/ec/ecresource/deploymentresource/apm/v2/schema.go b/ec/ecresource/deploymentresource/apm/v2/schema.go index 3783d57ce..b52e2dbc5 100644 --- a/ec/ecresource/deploymentresource/apm/v2/schema.go +++ b/ec/ecresource/deploymentresource/apm/v2/schema.go @@ -33,7 +33,7 @@ func ApmConfigSchema() tfsdk.Attribute { // DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, "docker_image": { Type: types.StringType, - Description: "Optionally override the docker image the APM nodes will use. Note that this field will only work for internal users only.", + Description: "Optionally override the docker image the APM nodes will use. This option will not work in ESS customers and should only be changed if you know what you're doing.", Optional: true, }, "debug_enabled": { diff --git a/ec/ecresource/deploymentresource/create.go b/ec/ecresource/deploymentresource/create.go index c63a19c05..447c3e6b9 100644 --- a/ec/ecresource/deploymentresource/create.go +++ b/ec/ecresource/deploymentresource/create.go @@ -76,7 +76,7 @@ func (r *Resource) Create(ctx context.Context, req resource.CreateRequest, resp return } - tflog.Trace(ctx, "created a resource") + tflog.Trace(ctx, "created deployment resource") resp.Diagnostics.Append(deploymentv.HandleRemoteClusters(ctx, r.client, *res.ID, plan.Elasticsearch)...) diff --git a/ec/ecresource/deploymentresource/deployment/v1/schema.go b/ec/ecresource/deploymentresource/deployment/v1/schema.go index 97d692fac..441a165a3 100644 --- a/ec/ecresource/deploymentresource/deployment/v1/schema.go +++ b/ec/ecresource/deploymentresource/deployment/v1/schema.go @@ -52,27 +52,27 @@ func DeploymentSchema() tfsdk.Schema { }, "version": { Type: types.StringType, - Description: "Required Elastic Stack version to use for all of the deployment resources", + Description: "Elastic Stack version to use for all of the deployment resources.", Required: true, }, "region": { Type: types.StringType, - Description: `Required ESS region where to create the deployment, for ECE environments "ece-region" must be set`, + Description: `Region when the deployment should be hosted. For ECE environments this should be set to "ece-region".`, Required: true, }, "deployment_template_id": { Type: types.StringType, - Description: "Required Deployment Template identifier to create the deployment from", + Description: "Deployment Template identifier to base the deployment from.", Required: true, }, "name": { Type: types.StringType, - Description: "Optional name for the deployment", + Description: "Name for the deployment.", Optional: true, }, "request_id": { Type: types.StringType, - Description: "Optional request_id to set on the create operation, only use when previous create attempts return with an error and a request_id is returned as part of the error", + Description: "request_id to set on the create operation, only used when a previous create attempt returns an error including a request_id.", Optional: true, Computed: true, PlanModifiers: tfsdk.AttributePlanModifiers{ @@ -81,12 +81,12 @@ func DeploymentSchema() tfsdk.Schema { }, "elasticsearch_username": { Type: types.StringType, - Description: "Computed username obtained upon creating the Elasticsearch resource", + Description: "Username for authenticating to the Elasticsearch resource.", Computed: true, }, "elasticsearch_password": { Type: types.StringType, - Description: "Computed password obtained upon creating the Elasticsearch resource", + Description: "Password for authenticating to the Elasticsearch resource", Computed: true, Sensitive: true, }, diff --git a/ec/ecresource/deploymentresource/deployment/v2/schema.go b/ec/ecresource/deploymentresource/deployment/v2/schema.go index 6918f1f8c..ce99fa796 100644 --- a/ec/ecresource/deploymentresource/deployment/v2/schema.go +++ b/ec/ecresource/deploymentresource/deployment/v2/schema.go @@ -40,7 +40,7 @@ func DeploymentSchema() tfsdk.Schema { "id": { Type: types.StringType, Computed: true, - MarkdownDescription: "Unique identifier of this resource.", + MarkdownDescription: "Unique identifier of this deployment.", PlanModifiers: tfsdk.AttributePlanModifiers{ resource.UseStateForUnknown(), }, @@ -55,27 +55,27 @@ func DeploymentSchema() tfsdk.Schema { }, "version": { Type: types.StringType, - Description: "Required Elastic Stack version to use for all of the deployment resources", + Description: "Elastic Stack version to use for all of the deployment resources.", Required: true, }, "region": { Type: types.StringType, - Description: `Required ESS region where to create the deployment, for ECE environments "ece-region" must be set`, + Description: `Region when the deployment should be hosted. For ECE environments this should be set to "ece-region".`, Required: true, }, "deployment_template_id": { Type: types.StringType, - Description: "Required Deployment Template identifier to create the deployment from", + Description: "Deployment Template identifier to base the deployment from.", Required: true, }, "name": { Type: types.StringType, - Description: "Optional name for the deployment", + Description: "Name for the deployment", Optional: true, }, "request_id": { Type: types.StringType, - Description: "Optional request_id to set on the create operation, only use when previous create attempts return with an error and a request_id is returned as part of the error", + Description: "request_id to set on the create operation, only used when a previous create attempt returns an error including a request_id.", Optional: true, Computed: true, PlanModifiers: tfsdk.AttributePlanModifiers{ @@ -84,7 +84,7 @@ func DeploymentSchema() tfsdk.Schema { }, "elasticsearch_username": { Type: types.StringType, - Description: "Computed username obtained upon creating the Elasticsearch resource", + Description: "Username for authenticating to the Elasticsearch resource.", Computed: true, PlanModifiers: tfsdk.AttributePlanModifiers{ resource.UseStateForUnknown(), @@ -92,7 +92,7 @@ func DeploymentSchema() tfsdk.Schema { }, "elasticsearch_password": { Type: types.StringType, - Description: "Computed password obtained upon creating the Elasticsearch resource", + Description: "Password for authenticating to the Elasticsearch resource.", Computed: true, Sensitive: true, PlanModifiers: tfsdk.AttributePlanModifiers{ diff --git a/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go b/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go index 7a9efc251..9c499eb45 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go @@ -133,7 +133,7 @@ func ElasticsearchConfigSchema() tfsdk.Attribute { // DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, "docker_image": { Type: types.StringType, - Description: "Optionally override the docker image the Elasticsearch nodes will use. Note that this field will only work for internal users only.", + Description: "Optionally override the docker image the Elasticsearch nodes will use. This option will not work in ESS customers and should only be changed if you know what you're doing.", Optional: true, }, "plugins": { From 0556078a9af86a82055369be36c1230b688b249c Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Thu, 22 Dec 2022 11:46:36 +0100 Subject: [PATCH 038/104] remove obsolete comments in v1 schemas --- ec/ecresource/deploymentresource/apm/v1/schema.go | 2 -- .../deploymentresource/elasticsearch/v1/schema.go | 7 ------- .../deploymentresource/enterprisesearch/v1/schema.go | 2 -- .../deploymentresource/integrationsserver/v1/schema.go | 2 -- ec/ecresource/deploymentresource/kibana/v1/schema.go | 4 +--- 5 files changed, 1 insertion(+), 16 deletions(-) diff --git a/ec/ecresource/deploymentresource/apm/v1/schema.go b/ec/ecresource/deploymentresource/apm/v1/schema.go index 49877fafa..c382bb7c6 100644 --- a/ec/ecresource/deploymentresource/apm/v1/schema.go +++ b/ec/ecresource/deploymentresource/apm/v1/schema.go @@ -77,8 +77,6 @@ func ApmConfigSchema() tfsdk.Attribute { Optional: true, Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ - // TODO - // DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, "docker_image": { Type: types.StringType, Description: "Optionally override the docker image the APM nodes will use. This option will not work in ESS customers and should only be changed if you know what you're doing.", diff --git a/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go b/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go index 9c499eb45..51bb78f0a 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go @@ -129,8 +129,6 @@ func ElasticsearchConfigSchema() tfsdk.Attribute { Optional: true, Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ - // TODO - // DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, "docker_image": { Type: types.StringType, Description: "Optionally override the docker image the Elasticsearch nodes will use. This option will not work in ESS customers and should only be changed if you know what you're doing.", @@ -449,11 +447,6 @@ func ElasticsearchStrategySchema() tfsdk.Attribute { Type: types.StringType, Required: true, Validators: []tfsdk.AttributeValidator{stringvalidator.OneOf("bundle", "plugin")}, - // TODO - // changes on this setting do not change the plan. - // DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - // return true - // }, }, }), } diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v1/schema.go b/ec/ecresource/deploymentresource/enterprisesearch/v1/schema.go index 6a7e89573..565faacaf 100644 --- a/ec/ecresource/deploymentresource/enterprisesearch/v1/schema.go +++ b/ec/ecresource/deploymentresource/enterprisesearch/v1/schema.go @@ -144,8 +144,6 @@ func EnterpriseSearchSchema() tfsdk.Attribute { Optional: true, Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ - // TODO - // DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, "docker_image": { Type: types.StringType, Description: "Optionally override the docker image the Enterprise Search nodes will use. Note that this field will only work for internal users only.", diff --git a/ec/ecresource/deploymentresource/integrationsserver/v1/schema.go b/ec/ecresource/deploymentresource/integrationsserver/v1/schema.go index 4fbc8117a..df10c37ef 100644 --- a/ec/ecresource/deploymentresource/integrationsserver/v1/schema.go +++ b/ec/ecresource/deploymentresource/integrationsserver/v1/schema.go @@ -123,8 +123,6 @@ func IntegrationsServerSchema() tfsdk.Attribute { Optional: true, Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ - // TODO - // DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, "docker_image": { Type: types.StringType, Description: "Optionally override the docker image the IntegrationsServer nodes will use. Note that this field will only work for internal users only.", diff --git a/ec/ecresource/deploymentresource/kibana/v1/schema.go b/ec/ecresource/deploymentresource/kibana/v1/schema.go index f95742771..8d775aae6 100644 --- a/ec/ecresource/deploymentresource/kibana/v1/schema.go +++ b/ec/ecresource/deploymentresource/kibana/v1/schema.go @@ -119,9 +119,7 @@ func KibanaSchema() tfsdk.Attribute { }), }, "config": { - Optional: true, - // TODO - // DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + Optional: true, Description: `Optionally define the Kibana configuration options for the Kibana Server`, Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ From 7277f4194d5f0420cab9ec593d5eebe78b1e6779 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Thu, 22 Dec 2022 14:13:39 +0100 Subject: [PATCH 039/104] split apm and deployment in deploymentresource to read and payload files --- .../apm/v2/{apm.go => apm_payload.go} | 76 +-- .../deploymentresource/apm/v2/apm_read.go | 92 +++ .../deployment/v2/deployment.go | 566 ------------------ .../v2/deployment_create_payload.go | 232 +++++++ .../v2/deployment_parse_credentials_test.go | 10 +- .../deployment/v2/deployment_read.go | 241 ++++++++ .../v2/deployment_update_payload.go | 158 +++++ .../deployment/v2/traffic_filter_test.go | 4 +- ec/ecresource/deploymentresource/read.go | 2 +- 9 files changed, 731 insertions(+), 650 deletions(-) rename ec/ecresource/deploymentresource/apm/v2/{apm.go => apm_payload.go} (64%) create mode 100644 ec/ecresource/deploymentresource/apm/v2/apm_read.go delete mode 100644 ec/ecresource/deploymentresource/deployment/v2/deployment.go create mode 100644 ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload.go create mode 100644 ec/ecresource/deploymentresource/deployment/v2/deployment_read.go create mode 100644 ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload.go diff --git a/ec/ecresource/deploymentresource/apm/v2/apm.go b/ec/ecresource/deploymentresource/apm/v2/apm_payload.go similarity index 64% rename from ec/ecresource/deploymentresource/apm/v2/apm.go rename to ec/ecresource/deploymentresource/apm/v2/apm_payload.go index a30311d9c..54327326f 100644 --- a/ec/ecresource/deploymentresource/apm/v2/apm.go +++ b/ec/ecresource/deploymentresource/apm/v2/apm_payload.go @@ -23,9 +23,6 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/models" v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v1" topologyv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/topology/v1" - "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" - "github.com/elastic/terraform-provider-ec/ec/internal/converters" - "github.com/elastic/terraform-provider-ec/ec/internal/util" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" @@ -45,73 +42,6 @@ type ApmTF struct { Config types.Object `tfsdk:"config"` } -type Apm struct { - ElasticsearchClusterRefId *string `tfsdk:"elasticsearch_cluster_ref_id"` - RefId *string `tfsdk:"ref_id"` - ResourceId *string `tfsdk:"resource_id"` - Region *string `tfsdk:"region"` - HttpEndpoint *string `tfsdk:"http_endpoint"` - HttpsEndpoint *string `tfsdk:"https_endpoint"` - InstanceConfigurationId *string `tfsdk:"instance_configuration_id"` - Size *string `tfsdk:"size"` - SizeResource *string `tfsdk:"size_resource"` - ZoneCount int `tfsdk:"zone_count"` - Config *ApmConfig `tfsdk:"config"` -} - -func ReadApms(in []*models.ApmResourceInfo) (*Apm, error) { - for _, model := range in { - if util.IsCurrentApmPlanEmpty(model) || utils.IsApmResourceStopped(model) { - continue - } - - apm, err := ReadApm(model) - if err != nil { - return nil, err - } - - return apm, nil - } - - return nil, nil -} - -func ReadApm(in *models.ApmResourceInfo) (*Apm, error) { - var apm Apm - - apm.RefId = in.RefID - apm.ResourceId = in.Info.ID - apm.Region = in.Region - plan := in.Info.PlanInfo.Current.Plan - - topologies, err := ReadApmTopologies(plan.ClusterTopology) - if err != nil { - return nil, err - } - - if len(topologies) > 0 { - apm.InstanceConfigurationId = topologies[0].InstanceConfigurationId - apm.Size = topologies[0].Size - apm.SizeResource = topologies[0].SizeResource - apm.ZoneCount = topologies[0].ZoneCount - } - - apm.ElasticsearchClusterRefId = in.ElasticsearchClusterRefID - - apm.HttpEndpoint, apm.HttpsEndpoint = converters.ExtractEndpoints(in.Info.Metadata) - - configs, err := readApmConfigs(plan.Apm) - if err != nil { - return nil, err - } - - if len(configs) > 0 { - apm.Config = &configs[0] - } - - return &apm, nil -} - func (apm ApmTF) Payload(ctx context.Context, payload models.ApmPayload) (*models.ApmPayload, diag.Diagnostics) { var diags diag.Diagnostics @@ -170,7 +100,7 @@ func ApmPayload(ctx context.Context, apmObj types.Object, template *models.Deplo return nil, nil } - templatePayload := ApmResource(template) + templatePayload := payloadFromTemplate(template) if templatePayload == nil { diags.AddError("apm payload error", "apm specified but deployment template is not configured for it. Use a different template if you wish to add apm") @@ -186,9 +116,9 @@ func ApmPayload(ctx context.Context, apmObj types.Object, template *models.Deplo return payload, nil } -// ApmResource returns the ApmPayload from a deployment +// payloadFromTemplate returns the ApmPayload from a deployment // template or an empty version of the payload. -func ApmResource(template *models.DeploymentTemplateInfoV2) *models.ApmPayload { +func payloadFromTemplate(template *models.DeploymentTemplateInfoV2) *models.ApmPayload { if template == nil || len(template.DeploymentTemplate.Resources.Apm) == 0 { return nil } diff --git a/ec/ecresource/deploymentresource/apm/v2/apm_read.go b/ec/ecresource/deploymentresource/apm/v2/apm_read.go new file mode 100644 index 000000000..d7750464f --- /dev/null +++ b/ec/ecresource/deploymentresource/apm/v2/apm_read.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/elastic/terraform-provider-ec/ec/internal/util" +) + +type Apm struct { + ElasticsearchClusterRefId *string `tfsdk:"elasticsearch_cluster_ref_id"` + RefId *string `tfsdk:"ref_id"` + ResourceId *string `tfsdk:"resource_id"` + Region *string `tfsdk:"region"` + HttpEndpoint *string `tfsdk:"http_endpoint"` + HttpsEndpoint *string `tfsdk:"https_endpoint"` + InstanceConfigurationId *string `tfsdk:"instance_configuration_id"` + Size *string `tfsdk:"size"` + SizeResource *string `tfsdk:"size_resource"` + ZoneCount int `tfsdk:"zone_count"` + Config *ApmConfig `tfsdk:"config"` +} + +func ReadApms(in []*models.ApmResourceInfo) (*Apm, error) { + for _, model := range in { + if util.IsCurrentApmPlanEmpty(model) || utils.IsApmResourceStopped(model) { + continue + } + + apm, err := ReadApm(model) + if err != nil { + return nil, err + } + + return apm, nil + } + + return nil, nil +} + +func ReadApm(in *models.ApmResourceInfo) (*Apm, error) { + var apm Apm + + apm.RefId = in.RefID + apm.ResourceId = in.Info.ID + apm.Region = in.Region + plan := in.Info.PlanInfo.Current.Plan + + topologies, err := ReadApmTopologies(plan.ClusterTopology) + if err != nil { + return nil, err + } + + if len(topologies) > 0 { + apm.InstanceConfigurationId = topologies[0].InstanceConfigurationId + apm.Size = topologies[0].Size + apm.SizeResource = topologies[0].SizeResource + apm.ZoneCount = topologies[0].ZoneCount + } + + apm.ElasticsearchClusterRefId = in.ElasticsearchClusterRefID + + apm.HttpEndpoint, apm.HttpsEndpoint = converters.ExtractEndpoints(in.Info.Metadata) + + configs, err := readApmConfigs(plan.Apm) + if err != nil { + return nil, err + } + + if len(configs) > 0 { + apm.Config = &configs[0] + } + + return &apm, nil +} diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment.go b/ec/ecresource/deploymentresource/deployment/v2/deployment.go deleted file mode 100644 index 281070269..000000000 --- a/ec/ecresource/deploymentresource/deployment/v2/deployment.go +++ /dev/null @@ -1,566 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package v2 - -import ( - "context" - "fmt" - - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deptemplateapi" - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/esremoteclustersapi" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - - apmv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v2" - elasticsearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" - enterprisesearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v2" - integrationsserverv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/integrationsserver/v2" - kibanav2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v2" - observabilityv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v2" - "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" - "github.com/elastic/terraform-provider-ec/ec/internal/converters" - "github.com/hashicorp/terraform-plugin-framework/diag" - "github.com/hashicorp/terraform-plugin-framework/tfsdk" - "github.com/hashicorp/terraform-plugin-framework/types" -) - -type DeploymentTF struct { - Id types.String `tfsdk:"id"` - Alias types.String `tfsdk:"alias"` - Version types.String `tfsdk:"version"` - Region types.String `tfsdk:"region"` - DeploymentTemplateId types.String `tfsdk:"deployment_template_id"` - Name types.String `tfsdk:"name"` - RequestId types.String `tfsdk:"request_id"` - ElasticsearchUsername types.String `tfsdk:"elasticsearch_username"` - ElasticsearchPassword types.String `tfsdk:"elasticsearch_password"` - ApmSecretToken types.String `tfsdk:"apm_secret_token"` - TrafficFilter types.Set `tfsdk:"traffic_filter"` - Tags types.Map `tfsdk:"tags"` - Elasticsearch types.Object `tfsdk:"elasticsearch"` - Kibana types.Object `tfsdk:"kibana"` - Apm types.Object `tfsdk:"apm"` - IntegrationsServer types.Object `tfsdk:"integrations_server"` - EnterpriseSearch types.Object `tfsdk:"enterprise_search"` - Observability types.Object `tfsdk:"observability"` -} - -type Deployment struct { - Id string `tfsdk:"id"` - Alias string `tfsdk:"alias"` - Version string `tfsdk:"version"` - Region string `tfsdk:"region"` - DeploymentTemplateId string `tfsdk:"deployment_template_id"` - Name string `tfsdk:"name"` - RequestId string `tfsdk:"request_id"` - ElasticsearchUsername string `tfsdk:"elasticsearch_username"` - ElasticsearchPassword string `tfsdk:"elasticsearch_password"` - ApmSecretToken *string `tfsdk:"apm_secret_token"` - TrafficFilter []string `tfsdk:"traffic_filter"` - Tags map[string]string `tfsdk:"tags"` - Elasticsearch *elasticsearchv2.Elasticsearch `tfsdk:"elasticsearch"` - Kibana *kibanav2.Kibana `tfsdk:"kibana"` - Apm *apmv2.Apm `tfsdk:"apm"` - IntegrationsServer *integrationsserverv2.IntegrationsServer `tfsdk:"integrations_server"` - EnterpriseSearch *enterprisesearchv2.EnterpriseSearch `tfsdk:"enterprise_search"` - Observability *observabilityv2.Observability `tfsdk:"observability"` -} - -// Nullify Elasticsearch topologies that have zero size and are not specified in plan -func (dep *Deployment) NullifyNotUsedEsTopologies(ctx context.Context, esPlan *elasticsearchv2.ElasticsearchTF) { - if dep.Elasticsearch == nil { - return - } - - if esPlan == nil { - return - } - - dep.Elasticsearch.HotTier = nullifyUnspecifiedZeroSizedTier(esPlan.HotContentTier, dep.Elasticsearch.HotTier) - - dep.Elasticsearch.WarmTier = nullifyUnspecifiedZeroSizedTier(esPlan.WarmTier, dep.Elasticsearch.WarmTier) - - dep.Elasticsearch.ColdTier = nullifyUnspecifiedZeroSizedTier(esPlan.ColdTier, dep.Elasticsearch.ColdTier) - - dep.Elasticsearch.FrozenTier = nullifyUnspecifiedZeroSizedTier(esPlan.FrozenTier, dep.Elasticsearch.FrozenTier) - - dep.Elasticsearch.MlTier = nullifyUnspecifiedZeroSizedTier(esPlan.MlTier, dep.Elasticsearch.MlTier) - - dep.Elasticsearch.MasterTier = nullifyUnspecifiedZeroSizedTier(esPlan.MasterTier, dep.Elasticsearch.MasterTier) - - dep.Elasticsearch.CoordinatingTier = nullifyUnspecifiedZeroSizedTier(esPlan.CoordinatingTier, dep.Elasticsearch.CoordinatingTier) -} - -func nullifyUnspecifiedZeroSizedTier(tierPlan types.Object, tier *elasticsearchv2.ElasticsearchTopology) *elasticsearchv2.ElasticsearchTopology { - - if tierPlan.IsNull() && tier != nil { - - size, err := converters.ParseTopologySize(tier.Size, tier.SizeResource) - - // we can ignore returning an error here - it's handled in readers - if err == nil && size != nil && size.Value != nil && *size.Value == 0 { - tier = nil - } - } - - return tier -} - -func ReadDeployment(res *models.DeploymentGetResponse, remotes *models.RemoteResources, deploymentResources []*models.DeploymentResource) (*Deployment, error) { - var dep Deployment - - if res.ID == nil { - return nil, utils.MissingField("ID") - } - dep.Id = *res.ID - - dep.Alias = res.Alias - - if res.Name == nil { - return nil, utils.MissingField("Name") - } - dep.Name = *res.Name - - if res.Metadata != nil { - dep.Tags = converters.ModelsTagsToMap(res.Metadata.Tags) - } - - if res.Resources == nil { - return nil, nil - } - - templateID, err := utils.GetDeploymentTemplateID(res.Resources) - if err != nil { - return nil, err - } - - dep.DeploymentTemplateId = templateID - - dep.Region = utils.GetRegion(res.Resources) - - // We're reconciling the version and storing the lowest version of any - // of the deployment resources. This ensures that if an upgrade fails, - // the state version will be lower than the desired version, making - // retries possible. Once more resource types are added, the function - // needs to be modified to check those as well. - version, err := utils.GetLowestVersion(res.Resources) - if err != nil { - // This code path is highly unlikely, but we're bubbling up the - // error in case one of the versions isn't parseable by semver. - return nil, fmt.Errorf("failed reading deployment: %w", err) - } - dep.Version = version - - dep.Elasticsearch, err = elasticsearchv2.ReadElasticsearches(res.Resources.Elasticsearch, remotes) - if err != nil { - return nil, err - } - - if dep.Kibana, err = kibanav2.ReadKibanas(res.Resources.Kibana); err != nil { - return nil, err - } - - if dep.Apm, err = apmv2.ReadApms(res.Resources.Apm); err != nil { - return nil, err - } - - if dep.IntegrationsServer, err = integrationsserverv2.ReadIntegrationsServers(res.Resources.IntegrationsServer); err != nil { - return nil, err - } - - if dep.EnterpriseSearch, err = enterprisesearchv2.ReadEnterpriseSearches(res.Resources.EnterpriseSearch); err != nil { - return nil, err - } - - if dep.TrafficFilter, err = ReadTrafficFilters(res.Settings); err != nil { - return nil, err - } - - if dep.Observability, err = observabilityv2.ReadObservability(res.Settings); err != nil { - return nil, err - } - - if err := dep.parseCredentials(deploymentResources); err != nil { - return nil, err - } - - return &dep, nil -} - -func (dep DeploymentTF) CreateRequest(ctx context.Context, client *api.API) (*models.DeploymentCreateRequest, diag.Diagnostics) { - var result = models.DeploymentCreateRequest{ - Name: dep.Name.Value, - Alias: dep.Alias.Value, - Resources: &models.DeploymentCreateResources{}, - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{}, - } - - dtID := dep.DeploymentTemplateId.Value - version := dep.Version.Value - - var diagsnostics diag.Diagnostics - - template, err := deptemplateapi.Get(deptemplateapi.GetParams{ - API: client, - TemplateID: dtID, - Region: dep.Region.Value, - HideInstanceConfigurations: true, - }) - if err != nil { - diagsnostics.AddError("Deployment template get error", err.Error()) - return nil, diagsnostics - } - - useNodeRoles, err := utils.CompatibleWithNodeRoles(version) - if err != nil { - diagsnostics.AddError("Deployment parse error", err.Error()) - return nil, diagsnostics - } - - elasticsearchPayload, diags := elasticsearchv2.ElasticsearchPayload(ctx, dep.Elasticsearch, template, dtID, version, useNodeRoles, false) - - if diags.HasError() { - diagsnostics.Append(diags...) - } - - if elasticsearchPayload != nil { - result.Resources.Elasticsearch = []*models.ElasticsearchPayload{elasticsearchPayload} - } - - kibanaPayload, diags := kibanav2.KibanaPayload(ctx, dep.Kibana, template) - - if diags.HasError() { - diagsnostics.Append(diags...) - } - - if kibanaPayload != nil { - result.Resources.Kibana = []*models.KibanaPayload{kibanaPayload} - } - - apmPayload, diags := apmv2.ApmPayload(ctx, dep.Apm, template) - - if diags.HasError() { - diagsnostics.Append(diags...) - } - - if apmPayload != nil { - result.Resources.Apm = []*models.ApmPayload{apmPayload} - } - - integrationsServerPayload, diags := integrationsserverv2.IntegrationsServerPayload(ctx, dep.IntegrationsServer, template) - - if diags.HasError() { - diagsnostics.Append(diags...) - } - - if integrationsServerPayload != nil { - result.Resources.IntegrationsServer = []*models.IntegrationsServerPayload{integrationsServerPayload} - } - - enterpriseSearchPayload, diags := enterprisesearchv2.EnterpriseSearchesPayload(ctx, dep.EnterpriseSearch, template) - - if diags.HasError() { - diagsnostics.Append(diags...) - } - - if enterpriseSearchPayload != nil { - result.Resources.EnterpriseSearch = []*models.EnterpriseSearchPayload{enterpriseSearchPayload} - } - - if diags := TrafficFilterToModel(ctx, dep.TrafficFilter, &result); diags.HasError() { - diagsnostics.Append(diags...) - } - - observabilityPayload, diags := observabilityv2.ObservabilityPayload(ctx, dep.Observability, client) - - if diags.HasError() { - diagsnostics.Append(diags...) - } - - result.Settings.Observability = observabilityPayload - - result.Metadata.Tags, diags = converters.TypesMapToModelsTags(ctx, dep.Tags) - - if diags.HasError() { - diagsnostics.Append(diags...) - } - - return &result, diagsnostics -} - -func ReadTrafficFilters(in *models.DeploymentSettings) ([]string, error) { - if in == nil || in.TrafficFilterSettings == nil || len(in.TrafficFilterSettings.Rulesets) == 0 { - return nil, nil - } - - var rules []string - - return append(rules, in.TrafficFilterSettings.Rulesets...), nil -} - -// TrafficFilterToModel expands the flattened "traffic_filter" settings to a DeploymentCreateRequest. -func TrafficFilterToModel(ctx context.Context, set types.Set, req *models.DeploymentCreateRequest) diag.Diagnostics { - if len(set.Elems) == 0 || req == nil { - return nil - } - - if req.Settings == nil { - req.Settings = &models.DeploymentCreateSettings{} - } - - if req.Settings.TrafficFilterSettings == nil { - req.Settings.TrafficFilterSettings = &models.TrafficFilterSettings{} - } - - var rulesets []string - if diags := tfsdk.ValueAs(ctx, set, &rulesets); diags.HasError() { - return diags - } - - req.Settings.TrafficFilterSettings.Rulesets = append( - req.Settings.TrafficFilterSettings.Rulesets, - rulesets..., - ) - - return nil -} - -// parseCredentials parses the Create or Update response Resources populating -// credential settings in the Terraform state if the keys are found, currently -// populates the following credentials in plain text: -// * Elasticsearch username and Password -func (dep *Deployment) parseCredentials(resources []*models.DeploymentResource) error { - for _, res := range resources { - - if creds := res.Credentials; creds != nil { - if creds.Username != nil && *creds.Username != "" { - dep.ElasticsearchUsername = *creds.Username - } - - if creds.Password != nil && *creds.Password != "" { - dep.ElasticsearchPassword = *creds.Password - } - } - - if res.SecretToken != "" { - dep.ApmSecretToken = &res.SecretToken - } - } - - return nil -} - -func (dep *Deployment) ProcessSelfInObservability() { - - if dep.Observability == nil { - return - } - - if dep.Observability.DeploymentId == nil { - return - } - - if *dep.Observability.DeploymentId == dep.Id { - *dep.Observability.DeploymentId = "self" - } -} - -func (dep *Deployment) SetCredentialsIfEmpty(state *DeploymentTF) { - if state == nil { - return - } - - if dep.ElasticsearchPassword == "" && state.ElasticsearchPassword.Value != "" { - dep.ElasticsearchPassword = state.ElasticsearchPassword.Value - } - - if dep.ElasticsearchUsername == "" && state.ElasticsearchUsername.Value != "" { - dep.ElasticsearchUsername = state.ElasticsearchUsername.Value - } - - if (dep.ApmSecretToken == nil || *dep.ApmSecretToken == "") && state.ApmSecretToken.Value != "" { - dep.ApmSecretToken = &state.ApmSecretToken.Value - } -} - -func (plan DeploymentTF) UpdateRequest(ctx context.Context, client *api.API, state DeploymentTF) (*models.DeploymentUpdateRequest, diag.Diagnostics) { - var result = models.DeploymentUpdateRequest{ - Name: plan.Name.Value, - Alias: plan.Alias.Value, - PruneOrphans: ec.Bool(true), - Resources: &models.DeploymentUpdateResources{}, - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{}, - } - - dtID := plan.DeploymentTemplateId.Value - - var diagsnostics diag.Diagnostics - - template, err := deptemplateapi.Get(deptemplateapi.GetParams{ - API: client, - TemplateID: dtID, - Region: plan.Region.Value, - HideInstanceConfigurations: true, - }) - if err != nil { - diagsnostics.AddError("Deployment template get error", err.Error()) - return nil, diagsnostics - } - - // When the deployment template is changed, we need to skip the missing - // resource topologies to account for a new instance_configuration_id and - // a different default value. - skipEStopologies := plan.DeploymentTemplateId.Value != "" && plan.DeploymentTemplateId.Value != state.DeploymentTemplateId.Value && state.DeploymentTemplateId.Value != "" - // If the deployment_template_id is changed, then we skip updating the - // Elasticsearch topology to account for the case where the - // instance_configuration_id changes, i.e. Hot / Warm, etc. - // This might not be necessary going forward as we move to - // tiered Elasticsearch nodes. - - useNodeRoles, diags := utils.UseNodeRoles(state.Version, plan.Version) - - if diags.HasError() { - return nil, diags - } - - elasticsearchPayload, diags := elasticsearchv2.ElasticsearchPayload(ctx, plan.Elasticsearch, template, dtID, plan.Version.Value, useNodeRoles, skipEStopologies) - - if diags.HasError() { - diagsnostics.Append(diags...) - } - - if elasticsearchPayload != nil { - // if the restore snapshot operation has been specified, the snapshot restore - // can't be full once the cluster has been created, so the Strategy must be set - // to "partial". - ensurePartialSnapshotStrategy(elasticsearchPayload) - - result.Resources.Elasticsearch = append(result.Resources.Elasticsearch, elasticsearchPayload) - } - - kibanaPayload, diags := kibanav2.KibanaPayload(ctx, plan.Kibana, template) - if diags.HasError() { - diagsnostics.Append(diags...) - } - - if kibanaPayload != nil { - result.Resources.Kibana = append(result.Resources.Kibana, kibanaPayload) - } - - apmPayload, diags := apmv2.ApmPayload(ctx, plan.Apm, template) - if diags.HasError() { - diagsnostics.Append(diags...) - } - - if apmPayload != nil { - result.Resources.Apm = append(result.Resources.Apm, apmPayload) - } - - integrationsServerPayload, diags := integrationsserverv2.IntegrationsServerPayload(ctx, plan.IntegrationsServer, template) - if diags.HasError() { - diagsnostics.Append(diags...) - } - - if integrationsServerPayload != nil { - result.Resources.IntegrationsServer = append(result.Resources.IntegrationsServer, integrationsServerPayload) - } - - enterpriseSearchPayload, diags := enterprisesearchv2.EnterpriseSearchesPayload(ctx, plan.EnterpriseSearch, template) - if diags.HasError() { - diagsnostics.Append(diags...) - } - - if enterpriseSearchPayload != nil { - result.Resources.EnterpriseSearch = append(result.Resources.EnterpriseSearch, enterpriseSearchPayload) - } - - observabilityPayload, diags := observabilityv2.ObservabilityPayload(ctx, plan.Observability, client) - if diags.HasError() { - diagsnostics.Append(diags...) - } - result.Settings.Observability = observabilityPayload - - // In order to stop shipping logs and metrics, an empty Observability - // object must be passed, as opposed to a nil object when creating a - // deployment without observability settings. - if plan.Observability.IsNull() && !state.Observability.IsNull() { - result.Settings.Observability = &models.DeploymentObservabilitySettings{} - } - - result.Metadata.Tags, diags = converters.TypesMapToModelsTags(ctx, plan.Tags) - if diags.HasError() { - diagsnostics.Append(diags...) - } - - return &result, diagsnostics -} - -func ensurePartialSnapshotStrategy(es *models.ElasticsearchPayload) { - transient := es.Plan.Transient - if transient == nil || transient.RestoreSnapshot == nil { - return - } - transient.RestoreSnapshot.Strategy = "partial" -} - -// func HandleRemoteClusters(ctx context.Context, client *api.API, newState, oldState DeploymentTF) diag.Diagnostics { -func HandleRemoteClusters(ctx context.Context, client *api.API, deploymentId string, esObj types.Object) diag.Diagnostics { - remoteClusters, refId, diags := ElasticsearchRemoteClustersPayload(ctx, client, deploymentId, esObj) - - if diags.HasError() { - return diags - } - - if err := esremoteclustersapi.Update(esremoteclustersapi.UpdateParams{ - API: client, - DeploymentID: deploymentId, - RefID: refId, - RemoteResources: remoteClusters, - }); err != nil { - diags.AddError("cannot update remote clusters", err.Error()) - return diags - } - - return nil -} - -func ElasticsearchRemoteClustersPayload(ctx context.Context, client *api.API, deploymentId string, esObj types.Object) (*models.RemoteResources, string, diag.Diagnostics) { - var es *elasticsearchv2.ElasticsearchTF - - diags := tfsdk.ValueAs(ctx, esObj, &es) - - if diags.HasError() { - return nil, "", diags - } - - if es == nil { - var diags diag.Diagnostics - diags.AddError("failed create remote clusters payload", "there is no elasticsearch") - return nil, "", diags - } - - remoteRes, diags := elasticsearchv2.ElasticsearchRemoteClustersPayload(ctx, es.RemoteCluster) - if diags.HasError() { - return nil, "", diags - } - - return remoteRes, es.RefId.Value, nil -} diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload.go new file mode 100644 index 000000000..fb8fc1d35 --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload.go @@ -0,0 +1,232 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deptemplateapi" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/esremoteclustersapi" + "github.com/elastic/cloud-sdk-go/pkg/models" + + apmv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v2" + elasticsearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" + enterprisesearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v2" + integrationsserverv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/integrationsserver/v2" + kibanav2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v2" + observabilityv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v2" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DeploymentTF struct { + Id types.String `tfsdk:"id"` + Alias types.String `tfsdk:"alias"` + Version types.String `tfsdk:"version"` + Region types.String `tfsdk:"region"` + DeploymentTemplateId types.String `tfsdk:"deployment_template_id"` + Name types.String `tfsdk:"name"` + RequestId types.String `tfsdk:"request_id"` + ElasticsearchUsername types.String `tfsdk:"elasticsearch_username"` + ElasticsearchPassword types.String `tfsdk:"elasticsearch_password"` + ApmSecretToken types.String `tfsdk:"apm_secret_token"` + TrafficFilter types.Set `tfsdk:"traffic_filter"` + Tags types.Map `tfsdk:"tags"` + Elasticsearch types.Object `tfsdk:"elasticsearch"` + Kibana types.Object `tfsdk:"kibana"` + Apm types.Object `tfsdk:"apm"` + IntegrationsServer types.Object `tfsdk:"integrations_server"` + EnterpriseSearch types.Object `tfsdk:"enterprise_search"` + Observability types.Object `tfsdk:"observability"` +} + +func (dep DeploymentTF) CreateRequest(ctx context.Context, client *api.API) (*models.DeploymentCreateRequest, diag.Diagnostics) { + var result = models.DeploymentCreateRequest{ + Name: dep.Name.Value, + Alias: dep.Alias.Value, + Resources: &models.DeploymentCreateResources{}, + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{}, + } + + dtID := dep.DeploymentTemplateId.Value + version := dep.Version.Value + + var diagsnostics diag.Diagnostics + + template, err := deptemplateapi.Get(deptemplateapi.GetParams{ + API: client, + TemplateID: dtID, + Region: dep.Region.Value, + HideInstanceConfigurations: true, + }) + if err != nil { + diagsnostics.AddError("Deployment template get error", err.Error()) + return nil, diagsnostics + } + + useNodeRoles, err := utils.CompatibleWithNodeRoles(version) + if err != nil { + diagsnostics.AddError("Deployment parse error", err.Error()) + return nil, diagsnostics + } + + elasticsearchPayload, diags := elasticsearchv2.ElasticsearchPayload(ctx, dep.Elasticsearch, template, dtID, version, useNodeRoles, false) + + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if elasticsearchPayload != nil { + result.Resources.Elasticsearch = []*models.ElasticsearchPayload{elasticsearchPayload} + } + + kibanaPayload, diags := kibanav2.KibanaPayload(ctx, dep.Kibana, template) + + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if kibanaPayload != nil { + result.Resources.Kibana = []*models.KibanaPayload{kibanaPayload} + } + + apmPayload, diags := apmv2.ApmPayload(ctx, dep.Apm, template) + + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if apmPayload != nil { + result.Resources.Apm = []*models.ApmPayload{apmPayload} + } + + integrationsServerPayload, diags := integrationsserverv2.IntegrationsServerPayload(ctx, dep.IntegrationsServer, template) + + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if integrationsServerPayload != nil { + result.Resources.IntegrationsServer = []*models.IntegrationsServerPayload{integrationsServerPayload} + } + + enterpriseSearchPayload, diags := enterprisesearchv2.EnterpriseSearchesPayload(ctx, dep.EnterpriseSearch, template) + + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if enterpriseSearchPayload != nil { + result.Resources.EnterpriseSearch = []*models.EnterpriseSearchPayload{enterpriseSearchPayload} + } + + if diags := trafficFilterToModel(ctx, dep.TrafficFilter, &result); diags.HasError() { + diagsnostics.Append(diags...) + } + + observabilityPayload, diags := observabilityv2.ObservabilityPayload(ctx, dep.Observability, client) + + if diags.HasError() { + diagsnostics.Append(diags...) + } + + result.Settings.Observability = observabilityPayload + + result.Metadata.Tags, diags = converters.TypesMapToModelsTags(ctx, dep.Tags) + + if diags.HasError() { + diagsnostics.Append(diags...) + } + + return &result, diagsnostics +} + +// trafficFilterToModel expands the flattened "traffic_filter" settings to a DeploymentCreateRequest. +func trafficFilterToModel(ctx context.Context, set types.Set, req *models.DeploymentCreateRequest) diag.Diagnostics { + if len(set.Elems) == 0 || req == nil { + return nil + } + + if req.Settings == nil { + req.Settings = &models.DeploymentCreateSettings{} + } + + if req.Settings.TrafficFilterSettings == nil { + req.Settings.TrafficFilterSettings = &models.TrafficFilterSettings{} + } + + var rulesets []string + if diags := tfsdk.ValueAs(ctx, set, &rulesets); diags.HasError() { + return diags + } + + req.Settings.TrafficFilterSettings.Rulesets = append( + req.Settings.TrafficFilterSettings.Rulesets, + rulesets..., + ) + + return nil +} + +func HandleRemoteClusters(ctx context.Context, client *api.API, deploymentId string, esObj types.Object) diag.Diagnostics { + remoteClusters, refId, diags := elasticsearchRemoteClustersPayload(ctx, client, deploymentId, esObj) + + if diags.HasError() { + return diags + } + + if err := esremoteclustersapi.Update(esremoteclustersapi.UpdateParams{ + API: client, + DeploymentID: deploymentId, + RefID: refId, + RemoteResources: remoteClusters, + }); err != nil { + diags.AddError("cannot update remote clusters", err.Error()) + return diags + } + + return nil +} + +func elasticsearchRemoteClustersPayload(ctx context.Context, client *api.API, deploymentId string, esObj types.Object) (*models.RemoteResources, string, diag.Diagnostics) { + var es *elasticsearchv2.ElasticsearchTF + + diags := tfsdk.ValueAs(ctx, esObj, &es) + + if diags.HasError() { + return nil, "", diags + } + + if es == nil { + var diags diag.Diagnostics + diags.AddError("failed create remote clusters payload", "there is no elasticsearch") + return nil, "", diags + } + + remoteRes, diags := elasticsearchv2.ElasticsearchRemoteClustersPayload(ctx, es.RemoteCluster) + if diags.HasError() { + return nil, "", diags + } + + return remoteRes, es.RefId.Value, nil +} diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_parse_credentials_test.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_parse_credentials_test.go index 858e6e4d7..e7d197889 100644 --- a/ec/ecresource/deploymentresource/deployment/v2/deployment_parse_credentials_test.go +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_parse_credentials_test.go @@ -34,7 +34,6 @@ func Test_parseCredentials(t *testing.T) { name string args args want Deployment - err error }{ { name: "Parses credentials", @@ -75,13 +74,8 @@ func Test_parseCredentials(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := tt.args.dep.parseCredentials(tt.args.resources) - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - assert.Equal(t, tt.want, tt.args.dep) - } + tt.args.dep.parseCredentials(tt.args.resources) + assert.Equal(t, tt.want, tt.args.dep) }) } } diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_read.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_read.go new file mode 100644 index 000000000..0a46ca900 --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_read.go @@ -0,0 +1,241 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "fmt" + + "github.com/elastic/cloud-sdk-go/pkg/models" + + apmv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v2" + elasticsearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" + enterprisesearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v2" + integrationsserverv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/integrationsserver/v2" + kibanav2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v2" + observabilityv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v2" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type Deployment struct { + Id string `tfsdk:"id"` + Alias string `tfsdk:"alias"` + Version string `tfsdk:"version"` + Region string `tfsdk:"region"` + DeploymentTemplateId string `tfsdk:"deployment_template_id"` + Name string `tfsdk:"name"` + RequestId string `tfsdk:"request_id"` + ElasticsearchUsername string `tfsdk:"elasticsearch_username"` + ElasticsearchPassword string `tfsdk:"elasticsearch_password"` + ApmSecretToken *string `tfsdk:"apm_secret_token"` + TrafficFilter []string `tfsdk:"traffic_filter"` + Tags map[string]string `tfsdk:"tags"` + Elasticsearch *elasticsearchv2.Elasticsearch `tfsdk:"elasticsearch"` + Kibana *kibanav2.Kibana `tfsdk:"kibana"` + Apm *apmv2.Apm `tfsdk:"apm"` + IntegrationsServer *integrationsserverv2.IntegrationsServer `tfsdk:"integrations_server"` + EnterpriseSearch *enterprisesearchv2.EnterpriseSearch `tfsdk:"enterprise_search"` + Observability *observabilityv2.Observability `tfsdk:"observability"` +} + +// Nullify Elasticsearch topologies that have zero size and are not specified in plan +func (dep *Deployment) NullifyUnusedEsTopologies(ctx context.Context, esPlan *elasticsearchv2.ElasticsearchTF) { + if dep.Elasticsearch == nil { + return + } + + if esPlan == nil { + return + } + + dep.Elasticsearch.HotTier = nullifyUnspecifiedZeroSizedTier(esPlan.HotContentTier, dep.Elasticsearch.HotTier) + + dep.Elasticsearch.WarmTier = nullifyUnspecifiedZeroSizedTier(esPlan.WarmTier, dep.Elasticsearch.WarmTier) + + dep.Elasticsearch.ColdTier = nullifyUnspecifiedZeroSizedTier(esPlan.ColdTier, dep.Elasticsearch.ColdTier) + + dep.Elasticsearch.FrozenTier = nullifyUnspecifiedZeroSizedTier(esPlan.FrozenTier, dep.Elasticsearch.FrozenTier) + + dep.Elasticsearch.MlTier = nullifyUnspecifiedZeroSizedTier(esPlan.MlTier, dep.Elasticsearch.MlTier) + + dep.Elasticsearch.MasterTier = nullifyUnspecifiedZeroSizedTier(esPlan.MasterTier, dep.Elasticsearch.MasterTier) + + dep.Elasticsearch.CoordinatingTier = nullifyUnspecifiedZeroSizedTier(esPlan.CoordinatingTier, dep.Elasticsearch.CoordinatingTier) +} + +func nullifyUnspecifiedZeroSizedTier(tierPlan types.Object, tier *elasticsearchv2.ElasticsearchTopology) *elasticsearchv2.ElasticsearchTopology { + + if tierPlan.IsNull() && tier != nil { + + size, err := converters.ParseTopologySize(tier.Size, tier.SizeResource) + + // we can ignore returning an error here - it's handled in readers + if err == nil && size != nil && size.Value != nil && *size.Value == 0 { + tier = nil + } + } + + return tier +} + +func ReadDeployment(res *models.DeploymentGetResponse, remotes *models.RemoteResources, deploymentResources []*models.DeploymentResource) (*Deployment, error) { + var dep Deployment + + if res.ID == nil { + return nil, utils.MissingField("ID") + } + dep.Id = *res.ID + + dep.Alias = res.Alias + + if res.Name == nil { + return nil, utils.MissingField("Name") + } + dep.Name = *res.Name + + if res.Metadata != nil { + dep.Tags = converters.ModelsTagsToMap(res.Metadata.Tags) + } + + if res.Resources == nil { + return nil, nil + } + + templateID, err := utils.GetDeploymentTemplateID(res.Resources) + if err != nil { + return nil, err + } + + dep.DeploymentTemplateId = templateID + + dep.Region = utils.GetRegion(res.Resources) + + // We're reconciling the version and storing the lowest version of any + // of the deployment resources. This ensures that if an upgrade fails, + // the state version will be lower than the desired version, making + // retries possible. Once more resource types are added, the function + // needs to be modified to check those as well. + version, err := utils.GetLowestVersion(res.Resources) + if err != nil { + // This code path is highly unlikely, but we're bubbling up the + // error in case one of the versions isn't parseable by semver. + return nil, fmt.Errorf("failed reading deployment: %w", err) + } + dep.Version = version + + dep.Elasticsearch, err = elasticsearchv2.ReadElasticsearches(res.Resources.Elasticsearch, remotes) + if err != nil { + return nil, err + } + + if dep.Kibana, err = kibanav2.ReadKibanas(res.Resources.Kibana); err != nil { + return nil, err + } + + if dep.Apm, err = apmv2.ReadApms(res.Resources.Apm); err != nil { + return nil, err + } + + if dep.IntegrationsServer, err = integrationsserverv2.ReadIntegrationsServers(res.Resources.IntegrationsServer); err != nil { + return nil, err + } + + if dep.EnterpriseSearch, err = enterprisesearchv2.ReadEnterpriseSearches(res.Resources.EnterpriseSearch); err != nil { + return nil, err + } + + if dep.TrafficFilter, err = readTrafficFilters(res.Settings); err != nil { + return nil, err + } + + if dep.Observability, err = observabilityv2.ReadObservability(res.Settings); err != nil { + return nil, err + } + + dep.parseCredentials(deploymentResources) + + return &dep, nil +} + +func readTrafficFilters(in *models.DeploymentSettings) ([]string, error) { + if in == nil || in.TrafficFilterSettings == nil || len(in.TrafficFilterSettings.Rulesets) == 0 { + return nil, nil + } + + var rules []string + + return append(rules, in.TrafficFilterSettings.Rulesets...), nil +} + +// parseCredentials parses the Create or Update response Resources populating +// credential settings in the Terraform state if the keys are found, currently +// populates the following credentials in plain text: +// * Elasticsearch username and Password +func (dep *Deployment) parseCredentials(resources []*models.DeploymentResource) { + for _, res := range resources { + + if creds := res.Credentials; creds != nil { + if creds.Username != nil && *creds.Username != "" { + dep.ElasticsearchUsername = *creds.Username + } + + if creds.Password != nil && *creds.Password != "" { + dep.ElasticsearchPassword = *creds.Password + } + } + + if res.SecretToken != "" { + dep.ApmSecretToken = &res.SecretToken + } + } +} + +func (dep *Deployment) ProcessSelfInObservability() { + + if dep.Observability == nil { + return + } + + if dep.Observability.DeploymentId == nil { + return + } + + if *dep.Observability.DeploymentId == dep.Id { + *dep.Observability.DeploymentId = "self" + } +} + +func (dep *Deployment) SetCredentialsIfEmpty(state *DeploymentTF) { + if state == nil { + return + } + + if dep.ElasticsearchPassword == "" && state.ElasticsearchPassword.Value != "" { + dep.ElasticsearchPassword = state.ElasticsearchPassword.Value + } + + if dep.ElasticsearchUsername == "" && state.ElasticsearchUsername.Value != "" { + dep.ElasticsearchUsername = state.ElasticsearchUsername.Value + } + + if (dep.ApmSecretToken == nil || *dep.ApmSecretToken == "") && state.ApmSecretToken.Value != "" { + dep.ApmSecretToken = &state.ApmSecretToken.Value + } +} diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload.go new file mode 100644 index 000000000..5a0501481 --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload.go @@ -0,0 +1,158 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deptemplateapi" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + + apmv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v2" + elasticsearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" + enterprisesearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v2" + integrationsserverv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/integrationsserver/v2" + kibanav2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v2" + observabilityv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v2" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/hashicorp/terraform-plugin-framework/diag" +) + +func (plan DeploymentTF) UpdateRequest(ctx context.Context, client *api.API, state DeploymentTF) (*models.DeploymentUpdateRequest, diag.Diagnostics) { + var result = models.DeploymentUpdateRequest{ + Name: plan.Name.Value, + Alias: plan.Alias.Value, + PruneOrphans: ec.Bool(true), + Resources: &models.DeploymentUpdateResources{}, + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{}, + } + + dtID := plan.DeploymentTemplateId.Value + + var diagsnostics diag.Diagnostics + + template, err := deptemplateapi.Get(deptemplateapi.GetParams{ + API: client, + TemplateID: dtID, + Region: plan.Region.Value, + HideInstanceConfigurations: true, + }) + if err != nil { + diagsnostics.AddError("Deployment template get error", err.Error()) + return nil, diagsnostics + } + + // When the deployment template is changed, we need to skip the missing + // resource topologies to account for a new instance_configuration_id and + // a different default value. + skipEStopologies := plan.DeploymentTemplateId.Value != "" && plan.DeploymentTemplateId.Value != state.DeploymentTemplateId.Value && state.DeploymentTemplateId.Value != "" + // If the deployment_template_id is changed, then we skip updating the + // Elasticsearch topology to account for the case where the + // instance_configuration_id changes, i.e. Hot / Warm, etc. + // This might not be necessary going forward as we move to + // tiered Elasticsearch nodes. + + useNodeRoles, diags := utils.UseNodeRoles(state.Version, plan.Version) + + if diags.HasError() { + return nil, diags + } + + elasticsearchPayload, diags := elasticsearchv2.ElasticsearchPayload(ctx, plan.Elasticsearch, template, dtID, plan.Version.Value, useNodeRoles, skipEStopologies) + + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if elasticsearchPayload != nil { + // if the restore snapshot operation has been specified, the snapshot restore + // can't be full once the cluster has been created, so the Strategy must be set + // to "partial". + ensurePartialSnapshotStrategy(elasticsearchPayload) + + result.Resources.Elasticsearch = append(result.Resources.Elasticsearch, elasticsearchPayload) + } + + kibanaPayload, diags := kibanav2.KibanaPayload(ctx, plan.Kibana, template) + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if kibanaPayload != nil { + result.Resources.Kibana = append(result.Resources.Kibana, kibanaPayload) + } + + apmPayload, diags := apmv2.ApmPayload(ctx, plan.Apm, template) + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if apmPayload != nil { + result.Resources.Apm = append(result.Resources.Apm, apmPayload) + } + + integrationsServerPayload, diags := integrationsserverv2.IntegrationsServerPayload(ctx, plan.IntegrationsServer, template) + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if integrationsServerPayload != nil { + result.Resources.IntegrationsServer = append(result.Resources.IntegrationsServer, integrationsServerPayload) + } + + enterpriseSearchPayload, diags := enterprisesearchv2.EnterpriseSearchesPayload(ctx, plan.EnterpriseSearch, template) + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if enterpriseSearchPayload != nil { + result.Resources.EnterpriseSearch = append(result.Resources.EnterpriseSearch, enterpriseSearchPayload) + } + + observabilityPayload, diags := observabilityv2.ObservabilityPayload(ctx, plan.Observability, client) + if diags.HasError() { + diagsnostics.Append(diags...) + } + result.Settings.Observability = observabilityPayload + + // In order to stop shipping logs and metrics, an empty Observability + // object must be passed, as opposed to a nil object when creating a + // deployment without observability settings. + if plan.Observability.IsNull() && !state.Observability.IsNull() { + result.Settings.Observability = &models.DeploymentObservabilitySettings{} + } + + result.Metadata.Tags, diags = converters.TypesMapToModelsTags(ctx, plan.Tags) + if diags.HasError() { + diagsnostics.Append(diags...) + } + + return &result, diagsnostics +} + +func ensurePartialSnapshotStrategy(es *models.ElasticsearchPayload) { + transient := es.Plan.Transient + if transient == nil || transient.RestoreSnapshot == nil { + return + } + transient.RestoreSnapshot.Strategy = "partial" +} diff --git a/ec/ecresource/deploymentresource/deployment/v2/traffic_filter_test.go b/ec/ecresource/deploymentresource/deployment/v2/traffic_filter_test.go index 2bf4b9564..59bc5fce8 100644 --- a/ec/ecresource/deploymentresource/deployment/v2/traffic_filter_test.go +++ b/ec/ecresource/deploymentresource/deployment/v2/traffic_filter_test.go @@ -77,7 +77,7 @@ func TestParseTrafficFiltering(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := ReadTrafficFilters(tt.args.settings) + got, err := readTrafficFilters(tt.args.settings) assert.Nil(t, err) assert.Equal(t, tt.want, got) }) @@ -153,7 +153,7 @@ func Test_trafficFilterToModel(t *testing.T) { diags := tfsdk.ValueFrom(context.Background(), tt.args.filters, types.SetType{ElemType: types.StringType}, &filters) assert.Nil(t, diags) - diags = TrafficFilterToModel(context.Background(), filters, tt.args.req) + diags = trafficFilterToModel(context.Background(), filters, tt.args.req) assert.Nil(t, diags) assert.Equal(t, tt.want, tt.args.req) }) diff --git a/ec/ecresource/deploymentresource/read.go b/ec/ecresource/deploymentresource/read.go index bf012ae47..9714bfb14 100644 --- a/ec/ecresource/deploymentresource/read.go +++ b/ec/ecresource/deploymentresource/read.go @@ -143,7 +143,7 @@ func (r *Resource) read(ctx context.Context, id string, state *deploymentv2.Depl deployment.ProcessSelfInObservability() - deployment.NullifyNotUsedEsTopologies(ctx, elasticsearchPlan) + deployment.NullifyUnusedEsTopologies(ctx, elasticsearchPlan) // ReadDeployment returns empty config struct if there is no config, so we have to nullify it if plan doesn't contain it // we use state for plan in Read and there is no state during import so we need to check elasticsearchPlan against nil From d992a89c44b66f8f2b97722b24b97c7697f6aacc Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Thu, 22 Dec 2022 14:23:05 +0100 Subject: [PATCH 040/104] renaming and removing obsolete code --- ec/ecresource/deploymentresource/apm/v1/schema.go | 5 ++--- ec/ecresource/deploymentresource/apm/v2/apm_read.go | 2 +- ec/ecresource/deploymentresource/apm/v2/apm_topology.go | 6 +++--- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/ec/ecresource/deploymentresource/apm/v1/schema.go b/ec/ecresource/deploymentresource/apm/v1/schema.go index c382bb7c6..e5882f2df 100644 --- a/ec/ecresource/deploymentresource/apm/v1/schema.go +++ b/ec/ecresource/deploymentresource/apm/v1/schema.go @@ -27,9 +27,8 @@ import ( func ApmTopologySchema() tfsdk.Attribute { return tfsdk.Attribute{ - Description: "Optional topology attribute", - Optional: true, - Computed: true, + Optional: true, + Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ resource.UseStateForUnknown(), }, diff --git a/ec/ecresource/deploymentresource/apm/v2/apm_read.go b/ec/ecresource/deploymentresource/apm/v2/apm_read.go index d7750464f..42c3a550c 100644 --- a/ec/ecresource/deploymentresource/apm/v2/apm_read.go +++ b/ec/ecresource/deploymentresource/apm/v2/apm_read.go @@ -63,7 +63,7 @@ func ReadApm(in *models.ApmResourceInfo) (*Apm, error) { apm.Region = in.Region plan := in.Info.PlanInfo.Current.Plan - topologies, err := ReadApmTopologies(plan.ClusterTopology) + topologies, err := readApmTopologies(plan.ClusterTopology) if err != nil { return nil, err } diff --git a/ec/ecresource/deploymentresource/apm/v2/apm_topology.go b/ec/ecresource/deploymentresource/apm/v2/apm_topology.go index 35dde36ed..5543ef20f 100644 --- a/ec/ecresource/deploymentresource/apm/v2/apm_topology.go +++ b/ec/ecresource/deploymentresource/apm/v2/apm_topology.go @@ -34,7 +34,7 @@ const ( minimumApmSize = 512 ) -func ReadApmTopology(in *models.ApmTopologyElement) (*v1.Topology, error) { +func readApmTopology(in *models.ApmTopologyElement) (*v1.Topology, error) { var top v1.Topology if in.InstanceConfigurationID != "" { @@ -51,7 +51,7 @@ func ReadApmTopology(in *models.ApmTopologyElement) (*v1.Topology, error) { return &top, nil } -func ReadApmTopologies(in []*models.ApmTopologyElement) (v1.Topologies, error) { +func readApmTopologies(in []*models.ApmTopologyElement) (v1.Topologies, error) { topologies := make([]v1.Topology, 0, len(in)) for _, model := range in { @@ -59,7 +59,7 @@ func ReadApmTopologies(in []*models.ApmTopologyElement) (v1.Topologies, error) { continue } - topology, err := ReadApmTopology(model) + topology, err := readApmTopology(model) if err != nil { return nil, nil } From 8cf3c5440e36cbc4ea138a6559dda3995dbf0d2b Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Thu, 22 Dec 2022 14:38:58 +0100 Subject: [PATCH 041/104] split elasticsearch to read and payload files --- ...sticsearch.go => elasticsearch_payload.go} | 136 --------------- .../elasticsearch/v2/elasticsearch_read.go | 162 ++++++++++++++++++ 2 files changed, 162 insertions(+), 136 deletions(-) rename ec/ecresource/deploymentresource/elasticsearch/v2/{elasticsearch.go => elasticsearch_payload.go} (68%) create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read.go diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go similarity index 68% rename from ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch.go rename to ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go index 977a1a225..e4472db91 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go @@ -23,14 +23,11 @@ import ( "strings" "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" - "github.com/elastic/terraform-provider-ec/ec/internal/converters" - "github.com/elastic/terraform-provider-ec/ec/internal/util" ) type ElasticsearchTF struct { @@ -57,30 +54,6 @@ type ElasticsearchTF struct { Strategy types.String `tfsdk:"strategy"` } -type Elasticsearch struct { - Autoscale *string `tfsdk:"autoscale"` - RefId *string `tfsdk:"ref_id"` - ResourceId *string `tfsdk:"resource_id"` - Region *string `tfsdk:"region"` - CloudID *string `tfsdk:"cloud_id"` - HttpEndpoint *string `tfsdk:"http_endpoint"` - HttpsEndpoint *string `tfsdk:"https_endpoint"` - HotTier *ElasticsearchTopology `tfsdk:"hot"` - CoordinatingTier *ElasticsearchTopology `tfsdk:"coordinating"` - MasterTier *ElasticsearchTopology `tfsdk:"master"` - WarmTier *ElasticsearchTopology `tfsdk:"warm"` - ColdTier *ElasticsearchTopology `tfsdk:"cold"` - FrozenTier *ElasticsearchTopology `tfsdk:"frozen"` - MlTier *ElasticsearchTopology `tfsdk:"ml"` - Config *ElasticsearchConfig `tfsdk:"config"` - RemoteCluster ElasticsearchRemoteClusters `tfsdk:"remote_cluster"` - SnapshotSource *ElasticsearchSnapshotSource `tfsdk:"snapshot_source"` - Extension ElasticsearchExtensions `tfsdk:"extension"` - TrustAccount ElasticsearchTrustAccounts `tfsdk:"trust_account"` - TrustExternal ElasticsearchTrustExternals `tfsdk:"trust_external"` - Strategy *string `tfsdk:"strategy"` -} - func ElasticsearchPayload(ctx context.Context, esObj types.Object, template *models.DeploymentTemplateInfoV2, dtID, version string, useNodeRoles bool, skipTopologies bool) (*models.ElasticsearchPayload, diag.Diagnostics) { var es *ElasticsearchTF @@ -112,91 +85,6 @@ func ElasticsearchPayload(ctx context.Context, esObj types.Object, template *mod return payload, nil } -func ReadElasticsearches(in []*models.ElasticsearchResourceInfo, remotes *models.RemoteResources) (*Elasticsearch, error) { - for _, model := range in { - if util.IsCurrentEsPlanEmpty(model) || utils.IsEsResourceStopped(model) { - continue - } - es, err := ReadElasticsearch(model, remotes) - if err != nil { - return nil, err - } - return es, nil - } - - return nil, nil -} - -func ReadElasticsearch(in *models.ElasticsearchResourceInfo, remotes *models.RemoteResources) (*Elasticsearch, error) { - var es Elasticsearch - - if util.IsCurrentEsPlanEmpty(in) || utils.IsEsResourceStopped(in) { - return &es, nil - } - - if in.Info.ClusterID != nil && *in.Info.ClusterID != "" { - es.ResourceId = in.Info.ClusterID - } - - if in.RefID != nil && *in.RefID != "" { - es.RefId = in.RefID - } - - if in.Region != nil { - es.Region = in.Region - } - - plan := in.Info.PlanInfo.Current.Plan - var err error - - topologies, err := ReadElasticsearchTopologies(plan) - if err != nil { - return nil, err - } - es.setTopology(topologies) - - if plan.AutoscalingEnabled != nil { - es.Autoscale = ec.String(strconv.FormatBool(*plan.AutoscalingEnabled)) - } - - if meta := in.Info.Metadata; meta != nil && meta.CloudID != "" { - es.CloudID = &meta.CloudID - } - - es.HttpEndpoint, es.HttpsEndpoint = converters.ExtractEndpoints(in.Info.Metadata) - - es.Config, err = ReadElasticsearchConfig(plan.Elasticsearch) - if err != nil { - return nil, err - } - - clusters, err := ReadElasticsearchRemoteClusters(remotes.Resources) - if err != nil { - return nil, err - } - es.RemoteCluster = clusters - - extensions, err := ReadElasticsearchExtensions(plan.Elasticsearch) - if err != nil { - return nil, err - } - es.Extension = extensions - - accounts, err := ReadElasticsearchTrustAccounts(in.Info.Settings) - if err != nil { - return nil, err - } - es.TrustAccount = accounts - - externals, err := ReadElasticsearchTrustExternals(in.Info.Settings) - if err != nil { - return nil, err - } - es.TrustExternal = externals - - return &es, nil -} - func (es *ElasticsearchTF) Payload(ctx context.Context, res *models.ElasticsearchPayload, skipTopologies bool) (*models.ElasticsearchPayload, diag.Diagnostics) { var diags diag.Diagnostics @@ -280,30 +168,6 @@ func topologyPayload(ctx context.Context, topologyObj types.Object, id string, t return diags } -func (es *Elasticsearch) setTopology(topologies ElasticsearchTopologies) { - set := topologies.Set() - - for id, topology := range set { - topology := topology - switch id { - case "hot_content": - es.HotTier = &topology - case "coordinating": - es.CoordinatingTier = &topology - case "master": - es.MasterTier = &topology - case "warm": - es.WarmTier = &topology - case "cold": - es.ColdTier = &topology - case "frozen": - es.FrozenTier = &topology - case "ml": - es.MlTier = &topology - } - } -} - func unsetElasticsearchCuration(payload *models.ElasticsearchPayload) { if payload.Plan.Elasticsearch != nil { payload.Plan.Elasticsearch.Curation = nil diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read.go new file mode 100644 index 000000000..659d6c55e --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read.go @@ -0,0 +1,162 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "strconv" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/elastic/terraform-provider-ec/ec/internal/util" +) + +type Elasticsearch struct { + Autoscale *string `tfsdk:"autoscale"` + RefId *string `tfsdk:"ref_id"` + ResourceId *string `tfsdk:"resource_id"` + Region *string `tfsdk:"region"` + CloudID *string `tfsdk:"cloud_id"` + HttpEndpoint *string `tfsdk:"http_endpoint"` + HttpsEndpoint *string `tfsdk:"https_endpoint"` + HotTier *ElasticsearchTopology `tfsdk:"hot"` + CoordinatingTier *ElasticsearchTopology `tfsdk:"coordinating"` + MasterTier *ElasticsearchTopology `tfsdk:"master"` + WarmTier *ElasticsearchTopology `tfsdk:"warm"` + ColdTier *ElasticsearchTopology `tfsdk:"cold"` + FrozenTier *ElasticsearchTopology `tfsdk:"frozen"` + MlTier *ElasticsearchTopology `tfsdk:"ml"` + Config *ElasticsearchConfig `tfsdk:"config"` + RemoteCluster ElasticsearchRemoteClusters `tfsdk:"remote_cluster"` + SnapshotSource *ElasticsearchSnapshotSource `tfsdk:"snapshot_source"` + Extension ElasticsearchExtensions `tfsdk:"extension"` + TrustAccount ElasticsearchTrustAccounts `tfsdk:"trust_account"` + TrustExternal ElasticsearchTrustExternals `tfsdk:"trust_external"` + Strategy *string `tfsdk:"strategy"` +} + +func ReadElasticsearches(in []*models.ElasticsearchResourceInfo, remotes *models.RemoteResources) (*Elasticsearch, error) { + for _, model := range in { + if util.IsCurrentEsPlanEmpty(model) || utils.IsEsResourceStopped(model) { + continue + } + es, err := ReadElasticsearch(model, remotes) + if err != nil { + return nil, err + } + return es, nil + } + + return nil, nil +} + +func ReadElasticsearch(in *models.ElasticsearchResourceInfo, remotes *models.RemoteResources) (*Elasticsearch, error) { + var es Elasticsearch + + if util.IsCurrentEsPlanEmpty(in) || utils.IsEsResourceStopped(in) { + return &es, nil + } + + if in.Info.ClusterID != nil && *in.Info.ClusterID != "" { + es.ResourceId = in.Info.ClusterID + } + + if in.RefID != nil && *in.RefID != "" { + es.RefId = in.RefID + } + + if in.Region != nil { + es.Region = in.Region + } + + plan := in.Info.PlanInfo.Current.Plan + var err error + + topologies, err := ReadElasticsearchTopologies(plan) + if err != nil { + return nil, err + } + es.setTopology(topologies) + + if plan.AutoscalingEnabled != nil { + es.Autoscale = ec.String(strconv.FormatBool(*plan.AutoscalingEnabled)) + } + + if meta := in.Info.Metadata; meta != nil && meta.CloudID != "" { + es.CloudID = &meta.CloudID + } + + es.HttpEndpoint, es.HttpsEndpoint = converters.ExtractEndpoints(in.Info.Metadata) + + es.Config, err = ReadElasticsearchConfig(plan.Elasticsearch) + if err != nil { + return nil, err + } + + clusters, err := ReadElasticsearchRemoteClusters(remotes.Resources) + if err != nil { + return nil, err + } + es.RemoteCluster = clusters + + extensions, err := ReadElasticsearchExtensions(plan.Elasticsearch) + if err != nil { + return nil, err + } + es.Extension = extensions + + accounts, err := ReadElasticsearchTrustAccounts(in.Info.Settings) + if err != nil { + return nil, err + } + es.TrustAccount = accounts + + externals, err := ReadElasticsearchTrustExternals(in.Info.Settings) + if err != nil { + return nil, err + } + es.TrustExternal = externals + + return &es, nil +} + +func (es *Elasticsearch) setTopology(topologies ElasticsearchTopologies) { + set := topologies.Set() + + for id, topology := range set { + topology := topology + switch id { + case "hot_content": + es.HotTier = &topology + case "coordinating": + es.CoordinatingTier = &topology + case "master": + es.MasterTier = &topology + case "warm": + es.WarmTier = &topology + case "cold": + es.ColdTier = &topology + case "frozen": + es.FrozenTier = &topology + case "ml": + es.MlTier = &topology + } + } +} From c8ad85ec5910ed94fb1e421fbed8cadc39443404 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Thu, 22 Dec 2022 14:42:38 +0100 Subject: [PATCH 042/104] split enterprise_search files to read and payload --- .../elasticsearch/v2/elasticsearch_payload.go | 4 +- ...search.go => enterprise_search_payload.go} | 83 -------------- .../v2/enterprise_search_read.go | 105 ++++++++++++++++++ 3 files changed, 107 insertions(+), 85 deletions(-) rename ec/ecresource/deploymentresource/enterprisesearch/v2/{enterprise_search.go => enterprise_search_payload.go} (62%) create mode 100644 ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read.go diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go index e4472db91..a9fbddb3b 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go @@ -108,7 +108,7 @@ func (es *ElasticsearchTF) Payload(ctx context.Context, res *models.Elasticsearc // Fixes the node_roles field to remove the dedicated tier roles from the // list when these are set as a dedicated tier as a topology element. - UpdateNodeRolesOnDedicatedTiers(res.Plan.ClusterTopology) + updateNodeRolesOnDedicatedTiers(res.Plan.ClusterTopology) res.Plan.Elasticsearch, ds = ElasticsearchConfigPayload(ctx, es.Config, res.Plan.Elasticsearch) diags.Append(ds...) @@ -178,7 +178,7 @@ func unsetElasticsearchCuration(payload *models.ElasticsearchPayload) { } } -func UpdateNodeRolesOnDedicatedTiers(topologies []*models.ElasticsearchClusterTopologyElement) { +func updateNodeRolesOnDedicatedTiers(topologies []*models.ElasticsearchClusterTopologyElement) { dataTier, hasMasterTier, hasIngestTier := dedicatedTopoogies(topologies) // This case is not very likely since all deployments will have a data tier. // It's here because the code path is technically possible and it's better diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_payload.go similarity index 62% rename from ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search.go rename to ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_payload.go index 3470d3509..d8e117db3 100644 --- a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search.go +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_payload.go @@ -22,9 +22,6 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/models" v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v1" - "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" - "github.com/elastic/terraform-provider-ec/ec/internal/converters" - "github.com/elastic/terraform-provider-ec/ec/internal/util" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" @@ -47,69 +44,6 @@ type EnterpriseSearchTF struct { Config types.Object `tfsdk:"config"` } -type EnterpriseSearch struct { - ElasticsearchClusterRefId *string `tfsdk:"elasticsearch_cluster_ref_id"` - RefId *string `tfsdk:"ref_id"` - ResourceId *string `tfsdk:"resource_id"` - Region *string `tfsdk:"region"` - HttpEndpoint *string `tfsdk:"http_endpoint"` - HttpsEndpoint *string `tfsdk:"https_endpoint"` - InstanceConfigurationId *string `tfsdk:"instance_configuration_id"` - Size *string `tfsdk:"size"` - SizeResource *string `tfsdk:"size_resource"` - ZoneCount int `tfsdk:"zone_count"` - NodeTypeAppserver *bool `tfsdk:"node_type_appserver"` - NodeTypeConnector *bool `tfsdk:"node_type_connector"` - NodeTypeWorker *bool `tfsdk:"node_type_worker"` - Config *EnterpriseSearchConfig `tfsdk:"config"` -} - -type EnterpriseSearches []EnterpriseSearch - -func ReadEnterpriseSearch(in *models.EnterpriseSearchResourceInfo) (*EnterpriseSearch, error) { - if util.IsCurrentEssPlanEmpty(in) || utils.IsEssResourceStopped(in) { - return nil, nil - } - - var ess EnterpriseSearch - - ess.RefId = in.RefID - - ess.ResourceId = in.Info.ID - - ess.Region = in.Region - - plan := in.Info.PlanInfo.Current.Plan - - topologies, err := ReadEnterpriseSearchTopologies(plan.ClusterTopology) - - if err != nil { - return nil, err - } - - if len(topologies) > 0 { - ess.InstanceConfigurationId = topologies[0].InstanceConfigurationId - ess.Size = topologies[0].Size - ess.SizeResource = topologies[0].SizeResource - ess.ZoneCount = topologies[0].ZoneCount - ess.NodeTypeAppserver = topologies[0].NodeTypeAppserver - ess.NodeTypeConnector = topologies[0].NodeTypeConnector - ess.NodeTypeWorker = topologies[0].NodeTypeWorker - } - - ess.ElasticsearchClusterRefId = in.ElasticsearchClusterRefID - - ess.HttpEndpoint, ess.HttpsEndpoint = converters.ExtractEndpoints(in.Info.Metadata) - - cfg, err := readEnterpriseSearchConfig(plan.EnterpriseSearch) - if err != nil { - return nil, err - } - ess.Config = cfg - - return &ess, nil -} - func (es *EnterpriseSearchTF) Payload(ctx context.Context, payload models.EnterpriseSearchPayload) (*models.EnterpriseSearchPayload, diag.Diagnostics) { var diags diag.Diagnostics @@ -158,23 +92,6 @@ func (es *EnterpriseSearchTF) Payload(ctx context.Context, payload models.Enterp return &payload, diags } -func ReadEnterpriseSearches(in []*models.EnterpriseSearchResourceInfo) (*EnterpriseSearch, error) { - for _, model := range in { - if util.IsCurrentEssPlanEmpty(model) || utils.IsEssResourceStopped(model) { - continue - } - - es, err := ReadEnterpriseSearch(model) - if err != nil { - return nil, err - } - - return es, nil - } - - return nil, nil -} - func EnterpriseSearchesPayload(ctx context.Context, esObj types.Object, template *models.DeploymentTemplateInfoV2) (*models.EnterpriseSearchPayload, diag.Diagnostics) { var diags diag.Diagnostics diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read.go new file mode 100644 index 000000000..998c2a52c --- /dev/null +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read.go @@ -0,0 +1,105 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/elastic/terraform-provider-ec/ec/internal/util" +) + +type EnterpriseSearch struct { + ElasticsearchClusterRefId *string `tfsdk:"elasticsearch_cluster_ref_id"` + RefId *string `tfsdk:"ref_id"` + ResourceId *string `tfsdk:"resource_id"` + Region *string `tfsdk:"region"` + HttpEndpoint *string `tfsdk:"http_endpoint"` + HttpsEndpoint *string `tfsdk:"https_endpoint"` + InstanceConfigurationId *string `tfsdk:"instance_configuration_id"` + Size *string `tfsdk:"size"` + SizeResource *string `tfsdk:"size_resource"` + ZoneCount int `tfsdk:"zone_count"` + NodeTypeAppserver *bool `tfsdk:"node_type_appserver"` + NodeTypeConnector *bool `tfsdk:"node_type_connector"` + NodeTypeWorker *bool `tfsdk:"node_type_worker"` + Config *EnterpriseSearchConfig `tfsdk:"config"` +} + +type EnterpriseSearches []EnterpriseSearch + +func ReadEnterpriseSearch(in *models.EnterpriseSearchResourceInfo) (*EnterpriseSearch, error) { + if util.IsCurrentEssPlanEmpty(in) || utils.IsEssResourceStopped(in) { + return nil, nil + } + + var ess EnterpriseSearch + + ess.RefId = in.RefID + + ess.ResourceId = in.Info.ID + + ess.Region = in.Region + + plan := in.Info.PlanInfo.Current.Plan + + topologies, err := ReadEnterpriseSearchTopologies(plan.ClusterTopology) + + if err != nil { + return nil, err + } + + if len(topologies) > 0 { + ess.InstanceConfigurationId = topologies[0].InstanceConfigurationId + ess.Size = topologies[0].Size + ess.SizeResource = topologies[0].SizeResource + ess.ZoneCount = topologies[0].ZoneCount + ess.NodeTypeAppserver = topologies[0].NodeTypeAppserver + ess.NodeTypeConnector = topologies[0].NodeTypeConnector + ess.NodeTypeWorker = topologies[0].NodeTypeWorker + } + + ess.ElasticsearchClusterRefId = in.ElasticsearchClusterRefID + + ess.HttpEndpoint, ess.HttpsEndpoint = converters.ExtractEndpoints(in.Info.Metadata) + + cfg, err := readEnterpriseSearchConfig(plan.EnterpriseSearch) + if err != nil { + return nil, err + } + ess.Config = cfg + + return &ess, nil +} + +func ReadEnterpriseSearches(in []*models.EnterpriseSearchResourceInfo) (*EnterpriseSearch, error) { + for _, model := range in { + if util.IsCurrentEssPlanEmpty(model) || utils.IsEssResourceStopped(model) { + continue + } + + es, err := ReadEnterpriseSearch(model) + if err != nil { + return nil, err + } + + return es, nil + } + + return nil, nil +} From f323cf18b6aed268d8ebe351f3c63a1456b94b6c Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Thu, 22 Dec 2022 14:49:53 +0100 Subject: [PATCH 043/104] split integrations_server files to read and payload --- ...rver.go => integrations_server_payload.go} | 80 +--------------- .../v2/integrations_server_read.go | 96 +++++++++++++++++++ 2 files changed, 99 insertions(+), 77 deletions(-) rename ec/ecresource/deploymentresource/integrationsserver/v2/{integrations_server.go => integrations_server_payload.go} (60%) create mode 100644 ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_read.go diff --git a/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server.go b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_payload.go similarity index 60% rename from ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server.go rename to ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_payload.go index 56f291e25..b3f125551 100644 --- a/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server.go +++ b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_payload.go @@ -22,9 +22,6 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/models" topologyv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/topology/v1" - "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" - "github.com/elastic/terraform-provider-ec/ec/internal/converters" - "github.com/elastic/terraform-provider-ec/ec/internal/util" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" @@ -44,77 +41,6 @@ type IntegrationsServerTF struct { Config types.Object `tfsdk:"config"` } -type IntegrationsServer struct { - ElasticsearchClusterRefId *string `tfsdk:"elasticsearch_cluster_ref_id"` - RefId *string `tfsdk:"ref_id"` - ResourceId *string `tfsdk:"resource_id"` - Region *string `tfsdk:"region"` - HttpEndpoint *string `tfsdk:"http_endpoint"` - HttpsEndpoint *string `tfsdk:"https_endpoint"` - InstanceConfigurationId *string `tfsdk:"instance_configuration_id"` - Size *string `tfsdk:"size"` - SizeResource *string `tfsdk:"size_resource"` - ZoneCount int `tfsdk:"zone_count"` - Config *IntegrationsServerConfig `tfsdk:"config"` -} - -func ReadIntegrationsServers(in []*models.IntegrationsServerResourceInfo) (*IntegrationsServer, error) { - for _, model := range in { - if util.IsCurrentIntegrationsServerPlanEmpty(model) || utils.IsIntegrationsServerResourceStopped(model) { - continue - } - - srv, err := readIntegrationsServer(model) - if err != nil { - return nil, err - } - - return srv, nil - } - - return nil, nil -} - -func readIntegrationsServer(in *models.IntegrationsServerResourceInfo) (*IntegrationsServer, error) { - - var srv IntegrationsServer - - srv.RefId = in.RefID - - srv.ResourceId = in.Info.ID - - srv.Region = in.Region - - plan := in.Info.PlanInfo.Current.Plan - - topologies, err := readIntegrationsServerTopologies(plan.ClusterTopology) - - if err != nil { - return nil, err - } - - if len(topologies) > 0 { - srv.InstanceConfigurationId = topologies[0].InstanceConfigurationId - srv.Size = topologies[0].Size - srv.SizeResource = topologies[0].SizeResource - srv.ZoneCount = topologies[0].ZoneCount - } - - srv.ElasticsearchClusterRefId = in.ElasticsearchClusterRefID - - srv.HttpEndpoint, srv.HttpsEndpoint = converters.ExtractEndpoints(in.Info.Metadata) - - cfg, err := readIntegrationsServerConfigs(plan.IntegrationsServer) - - if err != nil { - return nil, err - } - - srv.Config = cfg - - return &srv, nil -} - func (srv IntegrationsServerTF) Payload(ctx context.Context, payload models.IntegrationsServerPayload) (*models.IntegrationsServerPayload, diag.Diagnostics) { var diags diag.Diagnostics @@ -164,7 +90,7 @@ func IntegrationsServerPayload(ctx context.Context, srvObj types.Object, templat return nil, nil } - templatePayload := integrationsServerResource(template) + templatePayload := payloadFromTemplate(template) if templatePayload == nil { diags.AddError("integrations_server payload error", "integrations_server specified but deployment template is not configured for it. Use a different template if you wish to add integrations_server") @@ -180,9 +106,9 @@ func IntegrationsServerPayload(ctx context.Context, srvObj types.Object, templat return payload, nil } -// integrationsServerResource returns the IntegrationsServerPayload from a deployment +// payloadFromTemplate returns the IntegrationsServerPayload from a deployment // template or an empty version of the payload. -func integrationsServerResource(template *models.DeploymentTemplateInfoV2) *models.IntegrationsServerPayload { +func payloadFromTemplate(template *models.DeploymentTemplateInfoV2) *models.IntegrationsServerPayload { if template == nil || len(template.DeploymentTemplate.Resources.IntegrationsServer) == 0 { return nil } diff --git a/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_read.go b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_read.go new file mode 100644 index 000000000..d284000bb --- /dev/null +++ b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_read.go @@ -0,0 +1,96 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/elastic/terraform-provider-ec/ec/internal/util" +) + +type IntegrationsServer struct { + ElasticsearchClusterRefId *string `tfsdk:"elasticsearch_cluster_ref_id"` + RefId *string `tfsdk:"ref_id"` + ResourceId *string `tfsdk:"resource_id"` + Region *string `tfsdk:"region"` + HttpEndpoint *string `tfsdk:"http_endpoint"` + HttpsEndpoint *string `tfsdk:"https_endpoint"` + InstanceConfigurationId *string `tfsdk:"instance_configuration_id"` + Size *string `tfsdk:"size"` + SizeResource *string `tfsdk:"size_resource"` + ZoneCount int `tfsdk:"zone_count"` + Config *IntegrationsServerConfig `tfsdk:"config"` +} + +func ReadIntegrationsServers(in []*models.IntegrationsServerResourceInfo) (*IntegrationsServer, error) { + for _, model := range in { + if util.IsCurrentIntegrationsServerPlanEmpty(model) || utils.IsIntegrationsServerResourceStopped(model) { + continue + } + + srv, err := readIntegrationsServer(model) + if err != nil { + return nil, err + } + + return srv, nil + } + + return nil, nil +} + +func readIntegrationsServer(in *models.IntegrationsServerResourceInfo) (*IntegrationsServer, error) { + + var srv IntegrationsServer + + srv.RefId = in.RefID + + srv.ResourceId = in.Info.ID + + srv.Region = in.Region + + plan := in.Info.PlanInfo.Current.Plan + + topologies, err := readIntegrationsServerTopologies(plan.ClusterTopology) + + if err != nil { + return nil, err + } + + if len(topologies) > 0 { + srv.InstanceConfigurationId = topologies[0].InstanceConfigurationId + srv.Size = topologies[0].Size + srv.SizeResource = topologies[0].SizeResource + srv.ZoneCount = topologies[0].ZoneCount + } + + srv.ElasticsearchClusterRefId = in.ElasticsearchClusterRefID + + srv.HttpEndpoint, srv.HttpsEndpoint = converters.ExtractEndpoints(in.Info.Metadata) + + cfg, err := readIntegrationsServerConfigs(plan.IntegrationsServer) + + if err != nil { + return nil, err + } + + srv.Config = cfg + + return &srv, nil +} From a312dcaad33b425c93c068158e03735ef05e9c2e Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Thu, 22 Dec 2022 14:53:18 +0100 Subject: [PATCH 044/104] split kibana files to read and payload --- .../v2/{kibana.go => kibana_payload.go} | 78 +-------------- .../kibana/v2/kibana_read.go | 94 +++++++++++++++++++ 2 files changed, 97 insertions(+), 75 deletions(-) rename ec/ecresource/deploymentresource/kibana/v2/{kibana.go => kibana_payload.go} (63%) create mode 100644 ec/ecresource/deploymentresource/kibana/v2/kibana_read.go diff --git a/ec/ecresource/deploymentresource/kibana/v2/kibana.go b/ec/ecresource/deploymentresource/kibana/v2/kibana_payload.go similarity index 63% rename from ec/ecresource/deploymentresource/kibana/v2/kibana.go rename to ec/ecresource/deploymentresource/kibana/v2/kibana_payload.go index 0b9d02f5b..d3564231f 100644 --- a/ec/ecresource/deploymentresource/kibana/v2/kibana.go +++ b/ec/ecresource/deploymentresource/kibana/v2/kibana_payload.go @@ -23,9 +23,6 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/models" v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v1" topologyv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/topology/v1" - "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" - "github.com/elastic/terraform-provider-ec/ec/internal/converters" - "github.com/elastic/terraform-provider-ec/ec/internal/util" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" @@ -45,75 +42,6 @@ type KibanaTF struct { Config types.Object `tfsdk:"config"` } -type Kibana struct { - ElasticsearchClusterRefId *string `tfsdk:"elasticsearch_cluster_ref_id"` - RefId *string `tfsdk:"ref_id"` - ResourceId *string `tfsdk:"resource_id"` - Region *string `tfsdk:"region"` - HttpEndpoint *string `tfsdk:"http_endpoint"` - HttpsEndpoint *string `tfsdk:"https_endpoint"` - InstanceConfigurationId *string `tfsdk:"instance_configuration_id"` - Size *string `tfsdk:"size"` - SizeResource *string `tfsdk:"size_resource"` - ZoneCount int `tfsdk:"zone_count"` - Config *KibanaConfig `tfsdk:"config"` -} - -func ReadKibanas(in []*models.KibanaResourceInfo) (*Kibana, error) { - for _, model := range in { - if util.IsCurrentKibanaPlanEmpty(model) || utils.IsKibanaResourceStopped(model) { - continue - } - - kibana, err := ReadKibana(model) - if err != nil { - return nil, err - } - - return kibana, nil - } - - return nil, nil -} - -func ReadKibana(in *models.KibanaResourceInfo) (*Kibana, error) { - var kibana Kibana - - kibana.RefId = in.RefID - - kibana.ResourceId = in.Info.ClusterID - - kibana.Region = in.Region - - plan := in.Info.PlanInfo.Current.Plan - var err error - - topologies, err := readKibanaTopologies(plan.ClusterTopology) - if err != nil { - return nil, err - } - - if len(topologies) > 0 { - kibana.InstanceConfigurationId = topologies[0].InstanceConfigurationId - kibana.Size = topologies[0].Size - kibana.SizeResource = topologies[0].SizeResource - kibana.ZoneCount = topologies[0].ZoneCount - } - - kibana.ElasticsearchClusterRefId = in.ElasticsearchClusterRefID - - kibana.HttpEndpoint, kibana.HttpsEndpoint = converters.ExtractEndpoints(in.Info.Metadata) - - config, err := readKibanaConfig(plan.Kibana) - if err != nil { - return nil, err - } - - kibana.Config = config - - return &kibana, nil -} - func (kibana KibanaTF) Payload(ctx context.Context, payload models.KibanaPayload) (*models.KibanaPayload, diag.Diagnostics) { var diags diag.Diagnostics @@ -172,7 +100,7 @@ func KibanaPayload(ctx context.Context, kibanaObj types.Object, template *models return nil, nil } - templatePlayload := kibanaResource(template) + templatePlayload := payloadFromTemplate(template) if templatePlayload == nil { diags.AddError("kibana payload error", "kibana specified but deployment template is not configured for it. Use a different template if you wish to add kibana") @@ -188,9 +116,9 @@ func KibanaPayload(ctx context.Context, kibanaObj types.Object, template *models return payload, nil } -// kibanaResource returns the KibanaPayload from a deployment +// payloadFromTemplate returns the KibanaPayload from a deployment // template or an empty version of the payload. -func kibanaResource(res *models.DeploymentTemplateInfoV2) *models.KibanaPayload { +func payloadFromTemplate(res *models.DeploymentTemplateInfoV2) *models.KibanaPayload { if res == nil || len(res.DeploymentTemplate.Resources.Kibana) == 0 { return nil } diff --git a/ec/ecresource/deploymentresource/kibana/v2/kibana_read.go b/ec/ecresource/deploymentresource/kibana/v2/kibana_read.go new file mode 100644 index 000000000..0fc03902f --- /dev/null +++ b/ec/ecresource/deploymentresource/kibana/v2/kibana_read.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/elastic/terraform-provider-ec/ec/internal/util" +) + +type Kibana struct { + ElasticsearchClusterRefId *string `tfsdk:"elasticsearch_cluster_ref_id"` + RefId *string `tfsdk:"ref_id"` + ResourceId *string `tfsdk:"resource_id"` + Region *string `tfsdk:"region"` + HttpEndpoint *string `tfsdk:"http_endpoint"` + HttpsEndpoint *string `tfsdk:"https_endpoint"` + InstanceConfigurationId *string `tfsdk:"instance_configuration_id"` + Size *string `tfsdk:"size"` + SizeResource *string `tfsdk:"size_resource"` + ZoneCount int `tfsdk:"zone_count"` + Config *KibanaConfig `tfsdk:"config"` +} + +func ReadKibanas(in []*models.KibanaResourceInfo) (*Kibana, error) { + for _, model := range in { + if util.IsCurrentKibanaPlanEmpty(model) || utils.IsKibanaResourceStopped(model) { + continue + } + + kibana, err := readKibana(model) + if err != nil { + return nil, err + } + + return kibana, nil + } + + return nil, nil +} + +func readKibana(in *models.KibanaResourceInfo) (*Kibana, error) { + var kibana Kibana + + kibana.RefId = in.RefID + + kibana.ResourceId = in.Info.ClusterID + + kibana.Region = in.Region + + plan := in.Info.PlanInfo.Current.Plan + var err error + + topologies, err := readKibanaTopologies(plan.ClusterTopology) + if err != nil { + return nil, err + } + + if len(topologies) > 0 { + kibana.InstanceConfigurationId = topologies[0].InstanceConfigurationId + kibana.Size = topologies[0].Size + kibana.SizeResource = topologies[0].SizeResource + kibana.ZoneCount = topologies[0].ZoneCount + } + + kibana.ElasticsearchClusterRefId = in.ElasticsearchClusterRefID + + kibana.HttpEndpoint, kibana.HttpsEndpoint = converters.ExtractEndpoints(in.Info.Metadata) + + config, err := readKibanaConfig(plan.Kibana) + if err != nil { + return nil, err + } + + kibana.Config = config + + return &kibana, nil +} From 3000ee30e8248ffb61bd7d4550a6ba3e04a74c68 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Thu, 22 Dec 2022 14:57:00 +0100 Subject: [PATCH 045/104] split observability files to read and payload --- ...ervability.go => observability_payload.go} | 37 ------------ .../observability/v2/observability_read.go | 58 +++++++++++++++++++ 2 files changed, 58 insertions(+), 37 deletions(-) rename ec/ecresource/deploymentresource/observability/v2/{observability.go => observability_payload.go} (74%) create mode 100644 ec/ecresource/deploymentresource/observability/v2/observability_read.go diff --git a/ec/ecresource/deploymentresource/observability/v2/observability.go b/ec/ecresource/deploymentresource/observability/v2/observability_payload.go similarity index 74% rename from ec/ecresource/deploymentresource/observability/v2/observability.go rename to ec/ecresource/deploymentresource/observability/v2/observability_payload.go index 3a5006a5d..1fbfd62c5 100644 --- a/ec/ecresource/deploymentresource/observability/v2/observability.go +++ b/ec/ecresource/deploymentresource/observability/v2/observability_payload.go @@ -33,10 +33,6 @@ import ( type ObservabilityTF = v1.ObservabilityTF -type Observability = v1.Observability - -type Observabilities []Observability - func ObservabilityPayload(ctx context.Context, obsObj types.Object, client *api.API) (*models.DeploymentObservabilitySettings, diag.Diagnostics) { var observability *ObservabilityTF @@ -95,36 +91,3 @@ func ObservabilityPayload(ctx context.Context, obsObj types.Object, client *api. return &payload, nil } - -func ReadObservability(in *models.DeploymentSettings) (*Observability, error) { - if in == nil || in.Observability == nil { - return nil, nil - } - - var obs Observability - - // We are only accepting a single deployment ID and refID for both logs and metrics. - // If either of them is not nil the deployment ID and refID will be filled. - if in.Observability.Metrics != nil { - if in.Observability.Metrics.Destination.DeploymentID != nil { - obs.DeploymentId = in.Observability.Metrics.Destination.DeploymentID - } - - obs.RefId = &in.Observability.Metrics.Destination.RefID - obs.Metrics = true - } - - if in.Observability.Logging != nil { - if in.Observability.Logging.Destination.DeploymentID != nil { - obs.DeploymentId = in.Observability.Logging.Destination.DeploymentID - } - obs.RefId = &in.Observability.Logging.Destination.RefID - obs.Logs = true - } - - if obs == (Observability{}) { - return nil, nil - } - - return &obs, nil -} diff --git a/ec/ecresource/deploymentresource/observability/v2/observability_read.go b/ec/ecresource/deploymentresource/observability/v2/observability_read.go new file mode 100644 index 000000000..fa1c78cbb --- /dev/null +++ b/ec/ecresource/deploymentresource/observability/v2/observability_read.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "github.com/elastic/cloud-sdk-go/pkg/models" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v1" +) + +type Observability = v1.Observability + +func ReadObservability(in *models.DeploymentSettings) (*Observability, error) { + if in == nil || in.Observability == nil { + return nil, nil + } + + var obs Observability + + // We are only accepting a single deployment ID and refID for both logs and metrics. + // If either of them is not nil the deployment ID and refID will be filled. + if in.Observability.Metrics != nil { + if in.Observability.Metrics.Destination.DeploymentID != nil { + obs.DeploymentId = in.Observability.Metrics.Destination.DeploymentID + } + + obs.RefId = &in.Observability.Metrics.Destination.RefID + obs.Metrics = true + } + + if in.Observability.Logging != nil { + if in.Observability.Logging.Destination.DeploymentID != nil { + obs.DeploymentId = in.Observability.Logging.Destination.DeploymentID + } + obs.RefId = &in.Observability.Logging.Destination.RefID + obs.Logs = true + } + + if obs == (Observability{}) { + return nil, nil + } + + return &obs, nil +} From 4ae31c98db5804d0dc6331938690fddecc707bfd Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Thu, 22 Dec 2022 15:01:58 +0100 Subject: [PATCH 046/104] fix description --- ec/ecdatasource/deploymentdatasource/schema_kibana.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ec/ecdatasource/deploymentdatasource/schema_kibana.go b/ec/ecdatasource/deploymentdatasource/schema_kibana.go index c127d659b..63468f12b 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_kibana.go +++ b/ec/ecdatasource/deploymentdatasource/schema_kibana.go @@ -32,7 +32,7 @@ func kibanaResourceInfoSchema() tfsdk.Attribute { Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ "elasticsearch_cluster_ref_id": { Type: types.StringType, - Description: "The user-specified ID of the Elasticsearch cluster to which this resource kind will link.", + Description: "A locally-unique friendly alias for an Elasticsearch resource in this deployment.", Computed: true, }, "healthy": { From a624e68c9434d908e4fa8764d9addbed5d886587 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Thu, 22 Dec 2022 15:58:33 +0100 Subject: [PATCH 047/104] renaming and removing obsolete code --- .../deploymentresource/apm/v2/apm_payload.go | 4 ++-- .../deploymentresource/elasticsearch/v1/schema.go | 9 --------- .../elasticsearch/v2/elasticsearch_config.go | 4 ++-- .../elasticsearch/v2/elasticsearch_extension.go | 10 +++++----- .../elasticsearch/v2/elasticsearch_payload.go | 12 +++--------- .../elasticsearch/v2/elasticsearch_read.go | 8 ++++---- .../elasticsearch/v2/elasticsearch_read_test.go | 2 +- .../enterprisesearch/v2/enterprise_search_payload.go | 4 ++-- .../v2/integrations_server_payload.go | 4 ++-- .../deploymentresource/kibana/v2/kibana_payload.go | 4 ++-- 10 files changed, 23 insertions(+), 38 deletions(-) diff --git a/ec/ecresource/deploymentresource/apm/v2/apm_payload.go b/ec/ecresource/deploymentresource/apm/v2/apm_payload.go index 54327326f..04e5a7ac7 100644 --- a/ec/ecresource/deploymentresource/apm/v2/apm_payload.go +++ b/ec/ecresource/deploymentresource/apm/v2/apm_payload.go @@ -42,7 +42,7 @@ type ApmTF struct { Config types.Object `tfsdk:"config"` } -func (apm ApmTF) Payload(ctx context.Context, payload models.ApmPayload) (*models.ApmPayload, diag.Diagnostics) { +func (apm ApmTF) payload(ctx context.Context, payload models.ApmPayload) (*models.ApmPayload, diag.Diagnostics) { var diags diag.Diagnostics if !apm.ElasticsearchClusterRefId.IsNull() { @@ -107,7 +107,7 @@ func ApmPayload(ctx context.Context, apmObj types.Object, template *models.Deplo return nil, diags } - payload, diags := apm.Payload(ctx, *templatePayload) + payload, diags := apm.payload(ctx, *templatePayload) if diags.HasError() { return nil, diags diff --git a/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go b/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go index 51bb78f0a..4aac61fe8 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go @@ -18,15 +18,12 @@ package v1 import ( - "context" "strings" "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" "github.com/hashicorp/terraform-plugin-framework/attr" - "github.com/hashicorp/terraform-plugin-framework/diag" - "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" @@ -87,12 +84,6 @@ func ElasticsearchSchema() tfsdk.Attribute { Type: types.StringType, Description: "The encoded Elasticsearch credentials to use in Beats or Logstash", Computed: true, - PlanModifiers: tfsdk.AttributePlanModifiers{ - resource.UseStateForUnknown(), - resource.RequiresReplaceIf(func(ctx context.Context, state, config attr.Value, path path.Path) (bool, diag.Diagnostics) { - return true, nil - }, "", ""), - }, }, "http_endpoint": { Type: types.StringType, diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_config.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_config.go index 4f444b474..b85298032 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_config.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_config.go @@ -37,7 +37,7 @@ func (c *ElasticsearchConfig) IsEmpty() bool { return c == nil || reflect.ValueOf(*c).IsZero() } -func ReadElasticsearchConfig(in *models.ElasticsearchConfiguration) (*ElasticsearchConfig, error) { +func readElasticsearchConfig(in *models.ElasticsearchConfiguration) (*ElasticsearchConfig, error) { var config ElasticsearchConfig if in == nil { @@ -75,7 +75,7 @@ func ReadElasticsearchConfig(in *models.ElasticsearchConfiguration) (*Elasticsea return &config, nil } -func ElasticsearchConfigPayload(ctx context.Context, cfgObj attr.Value, model *models.ElasticsearchConfiguration) (*models.ElasticsearchConfiguration, diag.Diagnostics) { +func elasticsearchConfigPayload(ctx context.Context, cfgObj attr.Value, model *models.ElasticsearchConfiguration) (*models.ElasticsearchConfiguration, diag.Diagnostics) { if cfgObj.IsNull() || cfgObj.IsUnknown() { return model, nil } diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_extension.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_extension.go index 8371d6e04..c0faa0392 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_extension.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_extension.go @@ -30,7 +30,7 @@ import ( type ElasticsearchExtensions v1.ElasticsearchExtensions -func ReadElasticsearchExtensions(in *models.ElasticsearchConfiguration) (ElasticsearchExtensions, error) { +func readElasticsearchExtensions(in *models.ElasticsearchConfiguration) (ElasticsearchExtensions, error) { if len(in.UserBundles) == 0 && len(in.UserPlugins) == 0 { return nil, nil } @@ -38,7 +38,7 @@ func ReadElasticsearchExtensions(in *models.ElasticsearchConfiguration) (Elastic extensions := make(ElasticsearchExtensions, 0, len(in.UserBundles)+len(in.UserPlugins)) for _, model := range in.UserBundles { - extension, err := ReadFromUserBundle(model) + extension, err := readFromUserBundle(model) if err != nil { return nil, err } @@ -47,7 +47,7 @@ func ReadElasticsearchExtensions(in *models.ElasticsearchConfiguration) (Elastic } for _, model := range in.UserPlugins { - extension, err := ReadFromUserPlugin(model) + extension, err := readFromUserPlugin(model) if err != nil { return nil, err } @@ -89,7 +89,7 @@ func elasticsearchExtensionPayload(ctx context.Context, extensions types.Set, es return nil } -func ReadFromUserBundle(in *models.ElasticsearchUserBundle) (*v1.ElasticsearchExtension, error) { +func readFromUserBundle(in *models.ElasticsearchUserBundle) (*v1.ElasticsearchExtension, error) { var ext v1.ElasticsearchExtension ext.Type = "bundle" @@ -112,7 +112,7 @@ func ReadFromUserBundle(in *models.ElasticsearchUserBundle) (*v1.ElasticsearchEx return &ext, nil } -func ReadFromUserPlugin(in *models.ElasticsearchUserPlugin) (*v1.ElasticsearchExtension, error) { +func readFromUserPlugin(in *models.ElasticsearchUserPlugin) (*v1.ElasticsearchExtension, error) { var ext v1.ElasticsearchExtension ext.Type = "plugin" diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go index a9fbddb3b..7464075df 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go @@ -69,15 +69,9 @@ func ElasticsearchPayload(ctx context.Context, esObj types.Object, template *mod return nil, nil } - if es == nil { - var diags diag.Diagnostics - diags.AddError("Elasticsearch payload error", "cannot find elasticsearch data") - return nil, diags - } - templatePayload := utils.EnrichElasticsearchTemplate(utils.EsResource(template), dtID, version, useNodeRoles) - payload, diags := es.Payload(ctx, templatePayload, skipTopologies) + payload, diags := es.payload(ctx, templatePayload, skipTopologies) if diags.HasError() { return nil, diags } @@ -85,7 +79,7 @@ func ElasticsearchPayload(ctx context.Context, esObj types.Object, template *mod return payload, nil } -func (es *ElasticsearchTF) Payload(ctx context.Context, res *models.ElasticsearchPayload, skipTopologies bool) (*models.ElasticsearchPayload, diag.Diagnostics) { +func (es *ElasticsearchTF) payload(ctx context.Context, res *models.ElasticsearchPayload, skipTopologies bool) (*models.ElasticsearchPayload, diag.Diagnostics) { var diags diag.Diagnostics if !es.RefId.IsNull() { @@ -110,7 +104,7 @@ func (es *ElasticsearchTF) Payload(ctx context.Context, res *models.Elasticsearc // list when these are set as a dedicated tier as a topology element. updateNodeRolesOnDedicatedTiers(res.Plan.ClusterTopology) - res.Plan.Elasticsearch, ds = ElasticsearchConfigPayload(ctx, es.Config, res.Plan.Elasticsearch) + res.Plan.Elasticsearch, ds = elasticsearchConfigPayload(ctx, es.Config, res.Plan.Elasticsearch) diags.Append(ds...) diags.Append(elasticsearchSnapshotSourcePayload(ctx, es.SnapshotSource, res.Plan)...) diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read.go index 659d6c55e..4356ba1f3 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read.go @@ -57,7 +57,7 @@ func ReadElasticsearches(in []*models.ElasticsearchResourceInfo, remotes *models if util.IsCurrentEsPlanEmpty(model) || utils.IsEsResourceStopped(model) { continue } - es, err := ReadElasticsearch(model, remotes) + es, err := readElasticsearch(model, remotes) if err != nil { return nil, err } @@ -67,7 +67,7 @@ func ReadElasticsearches(in []*models.ElasticsearchResourceInfo, remotes *models return nil, nil } -func ReadElasticsearch(in *models.ElasticsearchResourceInfo, remotes *models.RemoteResources) (*Elasticsearch, error) { +func readElasticsearch(in *models.ElasticsearchResourceInfo, remotes *models.RemoteResources) (*Elasticsearch, error) { var es Elasticsearch if util.IsCurrentEsPlanEmpty(in) || utils.IsEsResourceStopped(in) { @@ -105,7 +105,7 @@ func ReadElasticsearch(in *models.ElasticsearchResourceInfo, remotes *models.Rem es.HttpEndpoint, es.HttpsEndpoint = converters.ExtractEndpoints(in.Info.Metadata) - es.Config, err = ReadElasticsearchConfig(plan.Elasticsearch) + es.Config, err = readElasticsearchConfig(plan.Elasticsearch) if err != nil { return nil, err } @@ -116,7 +116,7 @@ func ReadElasticsearch(in *models.ElasticsearchResourceInfo, remotes *models.Rem } es.RemoteCluster = clusters - extensions, err := ReadElasticsearchExtensions(plan.Elasticsearch) + extensions, err := readElasticsearchExtensions(plan.Elasticsearch) if err != nil { return nil, err } diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go index 3623c11e5..031263b2e 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go @@ -421,7 +421,7 @@ func Test_readElasticsearchConfig(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := ReadElasticsearchConfig(tt.args.cfg) + got, err := readElasticsearchConfig(tt.args.cfg) assert.Nil(t, err) assert.Equal(t, tt.want, got) diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_payload.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_payload.go index d8e117db3..a1ab40cd6 100644 --- a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_payload.go +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_payload.go @@ -44,7 +44,7 @@ type EnterpriseSearchTF struct { Config types.Object `tfsdk:"config"` } -func (es *EnterpriseSearchTF) Payload(ctx context.Context, payload models.EnterpriseSearchPayload) (*models.EnterpriseSearchPayload, diag.Diagnostics) { +func (es *EnterpriseSearchTF) payload(ctx context.Context, payload models.EnterpriseSearchPayload) (*models.EnterpriseSearchPayload, diag.Diagnostics) { var diags diag.Diagnostics if !es.ElasticsearchClusterRefId.IsNull() { @@ -115,7 +115,7 @@ func EnterpriseSearchesPayload(ctx context.Context, esObj types.Object, template return nil, diags } - payload, diags := es.Payload(ctx, *templatePayload) + payload, diags := es.payload(ctx, *templatePayload) if diags.HasError() { return nil, diags diff --git a/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_payload.go b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_payload.go index b3f125551..67833eb15 100644 --- a/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_payload.go +++ b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_payload.go @@ -41,7 +41,7 @@ type IntegrationsServerTF struct { Config types.Object `tfsdk:"config"` } -func (srv IntegrationsServerTF) Payload(ctx context.Context, payload models.IntegrationsServerPayload) (*models.IntegrationsServerPayload, diag.Diagnostics) { +func (srv IntegrationsServerTF) payload(ctx context.Context, payload models.IntegrationsServerPayload) (*models.IntegrationsServerPayload, diag.Diagnostics) { var diags diag.Diagnostics if !srv.ElasticsearchClusterRefId.IsNull() { @@ -97,7 +97,7 @@ func IntegrationsServerPayload(ctx context.Context, srvObj types.Object, templat return nil, diags } - payload, diags := srv.Payload(ctx, *templatePayload) + payload, diags := srv.payload(ctx, *templatePayload) if diags.HasError() { return nil, diags diff --git a/ec/ecresource/deploymentresource/kibana/v2/kibana_payload.go b/ec/ecresource/deploymentresource/kibana/v2/kibana_payload.go index d3564231f..f32177aea 100644 --- a/ec/ecresource/deploymentresource/kibana/v2/kibana_payload.go +++ b/ec/ecresource/deploymentresource/kibana/v2/kibana_payload.go @@ -42,7 +42,7 @@ type KibanaTF struct { Config types.Object `tfsdk:"config"` } -func (kibana KibanaTF) Payload(ctx context.Context, payload models.KibanaPayload) (*models.KibanaPayload, diag.Diagnostics) { +func (kibana KibanaTF) payload(ctx context.Context, payload models.KibanaPayload) (*models.KibanaPayload, diag.Diagnostics) { var diags diag.Diagnostics if !kibana.ElasticsearchClusterRefId.IsNull() { @@ -107,7 +107,7 @@ func KibanaPayload(ctx context.Context, kibanaObj types.Object, template *models return nil, diags } - payload, diags := kibanaTF.Payload(ctx, *templatePlayload) + payload, diags := kibanaTF.payload(ctx, *templatePlayload) if diags.HasError() { return nil, diags From 4edcc2d17cf0cfeb76f3236f453c9d3e5e384636 Mon Sep 17 00:00:00 2001 From: Dmitry Onishchenko <8962171+dimuon@users.noreply.github.com> Date: Thu, 22 Dec 2022 18:34:20 +0100 Subject: [PATCH 048/104] Apply suggestions from code review Co-authored-by: Toby Brain --- .../deploymentresource/utils/getters.go | 17 ++++++++--------- .../utils/node_types_to_node_roles.go | 4 ---- 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/ec/ecresource/deploymentresource/utils/getters.go b/ec/ecresource/deploymentresource/utils/getters.go index 2441d8875..6a8d3fbee 100644 --- a/ec/ecresource/deploymentresource/utils/getters.go +++ b/ec/ecresource/deploymentresource/utils/getters.go @@ -28,35 +28,34 @@ import ( ) func HasRunningResources(res *models.DeploymentGetResponse) bool { - var hasRunning bool if res.Resources != nil { for _, r := range res.Resources.Elasticsearch { if !IsEsResourceStopped(r) { - hasRunning = true + return true } } for _, r := range res.Resources.Kibana { if !IsKibanaResourceStopped(r) { - hasRunning = true + return true } } for _, r := range res.Resources.Apm { if !IsApmResourceStopped(r) { - hasRunning = true + return true } } for _, r := range res.Resources.EnterpriseSearch { if !IsEssResourceStopped(r) { - hasRunning = true + return true } } for _, r := range res.Resources.IntegrationsServer { if !IsIntegrationsServerResourceStopped(r) { - hasRunning = true + return true } } } - return hasRunning + return false } func GetDeploymentTemplateID(res *models.DeploymentResources) (string, error) { @@ -94,14 +93,14 @@ func GetDeploymentTemplateID(res *models.DeploymentResources) (string, error) { return deploymentTemplateID, nil } -func GetRegion(res *models.DeploymentResources) (region string) { +func GetRegion(res *models.DeploymentResources) (string) { for _, r := range res.Elasticsearch { if r.Region != nil && *r.Region != "" { return *r.Region } } - return region + return "" } func GetLowestVersion(res *models.DeploymentResources) (string, error) { diff --git a/ec/ecresource/deploymentresource/utils/node_types_to_node_roles.go b/ec/ecresource/deploymentresource/utils/node_types_to_node_roles.go index ec1a70ff9..85134f04a 100644 --- a/ec/ecresource/deploymentresource/utils/node_types_to_node_roles.go +++ b/ec/ecresource/deploymentresource/utils/node_types_to_node_roles.go @@ -67,10 +67,6 @@ func LegacyToNodeRoles(stateVersion, planVersion types.String) (bool, diag.Diagn return true, nil } - // If the previous version is empty, node_roles should be used. - if stateVersion.Value == "" { - return true, nil - } var diags diag.Diagnostics oldV, err := semver.Parse(stateVersion.Value) From 39a1c435dd0669abf3ffabad205fdfed338e28e0 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Thu, 22 Dec 2022 18:13:40 +0100 Subject: [PATCH 049/104] endpoints extractions: use the converter func instead of the one from util --- .../deploymentdatasource/flatteners_apm.go | 7 ++---- .../flatteners_elasticsearch.go | 8 ++---- .../flatteners_enterprise_search.go | 7 ++---- .../flatteners_integrations_server.go | 7 ++---- .../deploymentdatasource/flatteners_kibana.go | 7 ++---- .../extract_endpoint_test.go} | 25 +++++++++++-------- ec/internal/util/helpers.go | 21 ---------------- 7 files changed, 25 insertions(+), 57 deletions(-) rename ec/internal/{util/helpers_test.go => converters/extract_endpoint_test.go} (75%) diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_apm.go b/ec/ecdatasource/deploymentdatasource/flatteners_apm.go index 54152e366..c30160feb 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_apm.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_apm.go @@ -26,6 +26,7 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) @@ -72,11 +73,7 @@ func flattenApmResources(ctx context.Context, in []*models.ApmResourceInfo, targ } if res.Info.Metadata != nil { - endpoints := util.FlattenClusterEndpoint(res.Info.Metadata) - if endpoints != nil { - model.HttpEndpoint = types.String{Value: endpoints["http_endpoint"].(string)} - model.HttpsEndpoint = types.String{Value: endpoints["https_endpoint"].(string)} - } + model.HttpEndpoint, model.HttpsEndpoint = converters.ExtractEndpointsToTypes(res.Info.Metadata) } } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go index 27792bc08..1e4fb5734 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go @@ -29,6 +29,7 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) @@ -76,12 +77,7 @@ func flattenElasticsearchResources(ctx context.Context, in []*models.Elasticsear if res.Info.Metadata != nil { model.CloudID = types.String{Value: res.Info.Metadata.CloudID} - - endpoints := util.FlattenClusterEndpoint(res.Info.Metadata) - if endpoints != nil { - model.HttpEndpoint = types.String{Value: endpoints["http_endpoint"].(string)} - model.HttpsEndpoint = types.String{Value: endpoints["https_endpoint"].(string)} - } + model.HttpEndpoint, model.HttpsEndpoint = converters.ExtractEndpointsToTypes(res.Info.Metadata) } } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search.go b/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search.go index 34519abb6..29c102c6c 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search.go @@ -26,6 +26,7 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) @@ -72,11 +73,7 @@ func flattenEnterpriseSearchResources(ctx context.Context, in []*models.Enterpri } if res.Info.Metadata != nil { - endpoints := util.FlattenClusterEndpoint(res.Info.Metadata) - if endpoints != nil { - model.HttpEndpoint = types.String{Value: endpoints["http_endpoint"].(string)} - model.HttpsEndpoint = types.String{Value: endpoints["https_endpoint"].(string)} - } + model.HttpEndpoint, model.HttpsEndpoint = converters.ExtractEndpointsToTypes(res.Info.Metadata) } } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server.go b/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server.go index 7b1ab6c0f..1ba715d81 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server.go @@ -26,6 +26,7 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) @@ -72,11 +73,7 @@ func flattenIntegrationsServerResources(ctx context.Context, in []*models.Integr } if res.Info.Metadata != nil { - endpoints := util.FlattenClusterEndpoint(res.Info.Metadata) - if endpoints != nil { - model.HttpEndpoint = types.String{Value: endpoints["http_endpoint"].(string)} - model.HttpsEndpoint = types.String{Value: endpoints["https_endpoint"].(string)} - } + model.HttpEndpoint, model.HttpsEndpoint = converters.ExtractEndpointsToTypes(res.Info.Metadata) } } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_kibana.go b/ec/ecdatasource/deploymentdatasource/flatteners_kibana.go index b5a427cc6..585498e5f 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_kibana.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_kibana.go @@ -26,6 +26,7 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) @@ -72,11 +73,7 @@ func flattenKibanaResources(ctx context.Context, in []*models.KibanaResourceInfo } if res.Info.Metadata != nil { - endpoints := util.FlattenClusterEndpoint(res.Info.Metadata) - if endpoints != nil { - model.HttpEndpoint = types.String{Value: endpoints["http_endpoint"].(string)} - model.HttpsEndpoint = types.String{Value: endpoints["https_endpoint"].(string)} - } + model.HttpEndpoint, model.HttpsEndpoint = converters.ExtractEndpointsToTypes(res.Info.Metadata) } } diff --git a/ec/internal/util/helpers_test.go b/ec/internal/converters/extract_endpoint_test.go similarity index 75% rename from ec/internal/util/helpers_test.go rename to ec/internal/converters/extract_endpoint_test.go index bee83a1e5..54c89313b 100644 --- a/ec/internal/util/helpers_test.go +++ b/ec/internal/converters/extract_endpoint_test.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. -package util +package converters import ( "testing" @@ -30,10 +30,14 @@ func TestFlattenClusterEndpoint(t *testing.T) { type args struct { metadata *models.ClusterMetadataInfo } + type want struct { + httpEndpoint *string + httpsEndpoint *string + } tests := []struct { name string args args - want map[string]interface{} + want want }{ { name: "returns nil when the endpoint info is empty", @@ -48,9 +52,9 @@ func TestFlattenClusterEndpoint(t *testing.T) { HTTPS: ec.Int32(9243), }, }}, - want: map[string]interface{}{ - "http_endpoint": "http://xyz.us-east-1.aws.found.io:9200", - "https_endpoint": "https://xyz.us-east-1.aws.found.io:9243", + want: want{ + httpEndpoint: ec.String("http://xyz.us-east-1.aws.found.io:9200"), + httpsEndpoint: ec.String("https://xyz.us-east-1.aws.found.io:9243"), }, }, { @@ -62,16 +66,17 @@ func TestFlattenClusterEndpoint(t *testing.T) { HTTPS: ec.Int32(20000), }, }}, - want: map[string]interface{}{ - "http_endpoint": "http://rst.us-east-1.aws.found.io:10000", - "https_endpoint": "https://rst.us-east-1.aws.found.io:20000", + want: want{ + httpEndpoint: ec.String("http://rst.us-east-1.aws.found.io:10000"), + httpsEndpoint: ec.String("https://rst.us-east-1.aws.found.io:20000"), }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := FlattenClusterEndpoint(tt.args.metadata) - assert.Equal(t, tt.want, got) + httpEndpoint, httpsEndpoint := ExtractEndpoints(tt.args.metadata) + assert.Equal(t, tt.want.httpEndpoint, httpEndpoint) + assert.Equal(t, tt.want.httpsEndpoint, httpsEndpoint) }) } } diff --git a/ec/internal/util/helpers.go b/ec/internal/util/helpers.go index 826fb40dd..d0f892ea8 100644 --- a/ec/internal/util/helpers.go +++ b/ec/internal/util/helpers.go @@ -18,7 +18,6 @@ package util import ( - "fmt" "os" "strconv" @@ -31,26 +30,6 @@ import ( // used in tests var GetEnv = os.Getenv -// FlattenClusterEndpoint receives a ClusterMetadataInfo, parses the http and -// https endpoints and returns a map with two keys: `http_endpoint` and -// `https_endpoint` -func FlattenClusterEndpoint(metadata *models.ClusterMetadataInfo) map[string]interface{} { - if metadata == nil || metadata.Endpoint == "" || metadata.Ports == nil { - return nil - } - - var m = make(map[string]interface{}) - if metadata.Ports.HTTP != nil { - m["http_endpoint"] = fmt.Sprintf("http://%s:%d", metadata.Endpoint, *metadata.Ports.HTTP) - } - - if metadata.Ports.HTTPS != nil { - m["https_endpoint"] = fmt.Sprintf("https://%s:%d", metadata.Endpoint, *metadata.Ports.HTTPS) - } - - return m -} - // IsCurrentEsPlanEmpty checks that the elasticsearch resource current plan is empty. func IsCurrentEsPlanEmpty(res *models.ElasticsearchResourceInfo) bool { return res.Info == nil || res.Info.PlanInfo == nil || From ebc05cf85de5255859c40b746291576fe0693a0d Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Thu, 22 Dec 2022 18:26:34 +0100 Subject: [PATCH 050/104] moving code around and some renaming --- ec/ecresource/deploymentresource/create.go | 8 ++-- ec/ecresource/deploymentresource/delete.go | 31 ------------- ec/ecresource/deploymentresource/update.go | 29 ++++++++++++ .../deploymentresource/utils/definitions.go | 1 - .../deploymentresource/utils/get_first.go | 44 ------------------- .../utils/node_types_to_node_roles.go | 6 +-- 6 files changed, 36 insertions(+), 83 deletions(-) delete mode 100644 ec/ecresource/deploymentresource/utils/get_first.go diff --git a/ec/ecresource/deploymentresource/create.go b/ec/ecresource/deploymentresource/create.go index 447c3e6b9..5930e3761 100644 --- a/ec/ecresource/deploymentresource/create.go +++ b/ec/ecresource/deploymentresource/create.go @@ -22,7 +22,7 @@ import ( "fmt" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" - deploymentv "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/deployment/v2" + v2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/deployment/v2" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-log/tflog" ) @@ -32,14 +32,14 @@ func (r *Resource) Create(ctx context.Context, req resource.CreateRequest, resp return } - var config deploymentv.DeploymentTF + var config v2.DeploymentTF diags := req.Config.Get(ctx, &config) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } - var plan deploymentv.DeploymentTF + var plan v2.DeploymentTF diags = req.Plan.Get(ctx, &plan) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { @@ -78,7 +78,7 @@ func (r *Resource) Create(ctx context.Context, req resource.CreateRequest, resp tflog.Trace(ctx, "created deployment resource") - resp.Diagnostics.Append(deploymentv.HandleRemoteClusters(ctx, r.client, *res.ID, plan.Elasticsearch)...) + resp.Diagnostics.Append(v2.HandleRemoteClusters(ctx, r.client, *res.ID, plan.Elasticsearch)...) deployment, diags := r.read(ctx, *res.ID, nil, plan, res.Resources) diff --git a/ec/ecresource/deploymentresource/delete.go b/ec/ecresource/deploymentresource/delete.go index 7f3111338..beacc735c 100644 --- a/ec/ecresource/deploymentresource/delete.go +++ b/ec/ecresource/deploymentresource/delete.go @@ -21,12 +21,9 @@ import ( "context" "errors" - "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" "github.com/elastic/cloud-sdk-go/pkg/client/deployments" deploymentv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/deployment/v2" - "github.com/elastic/terraform-provider-ec/ec/internal/util" "github.com/hashicorp/terraform-plugin-framework/resource" ) @@ -73,31 +70,3 @@ func alreadyDestroyed(err error) bool { var destroyed *deployments.ShutdownDeploymentNotFound return errors.As(err, &destroyed) } - -func removeRule(ruleID, deploymentID string, client *api.API) error { - res, err := trafficfilterapi.Get(trafficfilterapi.GetParams{ - API: client, ID: ruleID, IncludeAssociations: true, - }) - - // If the rule is gone (403 or 404), return nil. - if err != nil { - if util.TrafficFilterNotFound(err) { - return nil - } - return err - } - - // If the rule is found, then delete the association. - for _, assoc := range res.Associations { - if deploymentID == *assoc.ID { - return trafficfilterapi.DeleteAssociation(trafficfilterapi.DeleteAssociationParams{ - API: client, - ID: ruleID, - EntityID: *assoc.ID, - EntityType: *assoc.EntityType, - }) - } - } - - return nil -} diff --git a/ec/ecresource/deploymentresource/update.go b/ec/ecresource/deploymentresource/update.go index c09ae49c9..29dfb2715 100644 --- a/ec/ecresource/deploymentresource/update.go +++ b/ec/ecresource/deploymentresource/update.go @@ -24,6 +24,7 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" v2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/deployment/v2" + "github.com/elastic/terraform-provider-ec/ec/internal/util" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/resource" ) @@ -166,3 +167,31 @@ func associateRule(ruleID, deploymentID string, client *api.API) error { } return nil } + +func removeRule(ruleID, deploymentID string, client *api.API) error { + res, err := trafficfilterapi.Get(trafficfilterapi.GetParams{ + API: client, ID: ruleID, IncludeAssociations: true, + }) + + // If the rule is gone (403 or 404), return nil. + if err != nil { + if util.TrafficFilterNotFound(err) { + return nil + } + return err + } + + // If the rule is found, then delete the association. + for _, assoc := range res.Associations { + if deploymentID == *assoc.ID { + return trafficfilterapi.DeleteAssociation(trafficfilterapi.DeleteAssociationParams{ + API: client, + ID: ruleID, + EntityID: *assoc.ID, + EntityType: *assoc.EntityType, + }) + } + } + + return nil +} diff --git a/ec/ecresource/deploymentresource/utils/definitions.go b/ec/ecresource/deploymentresource/utils/definitions.go index 706c8bdd2..5fea5e34c 100644 --- a/ec/ecresource/deploymentresource/utils/definitions.go +++ b/ec/ecresource/deploymentresource/utils/definitions.go @@ -18,6 +18,5 @@ package utils const ( - // minimumEnterpriseSearchSize = 2048 MinimumZoneCount = 1 ) diff --git a/ec/ecresource/deploymentresource/utils/get_first.go b/ec/ecresource/deploymentresource/utils/get_first.go deleted file mode 100644 index eb882c19e..000000000 --- a/ec/ecresource/deploymentresource/utils/get_first.go +++ /dev/null @@ -1,44 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package utils - -import ( - "context" - - "github.com/hashicorp/terraform-plugin-framework/diag" - "github.com/hashicorp/terraform-plugin-framework/tfsdk" - "github.com/hashicorp/terraform-plugin-framework/types" -) - -func GetFirst(ctx context.Context, list types.List, target any) diag.Diagnostics { - if list.IsNull() || list.IsUnknown() || len(list.Elems) == 0 { - return nil - } - - if list.Elems[0].IsUnknown() || list.Elems[0].IsNull() { - return nil - } - - diags := tfsdk.ValueAs(ctx, list.Elems[0], target) - - if diags.HasError() { - return diags - } - - return nil -} diff --git a/ec/ecresource/deploymentresource/utils/node_types_to_node_roles.go b/ec/ecresource/deploymentresource/utils/node_types_to_node_roles.go index 85134f04a..533156f61 100644 --- a/ec/ecresource/deploymentresource/utils/node_types_to_node_roles.go +++ b/ec/ecresource/deploymentresource/utils/node_types_to_node_roles.go @@ -39,7 +39,7 @@ func UseNodeRoles(stateVersion, planVersion types.String) (bool, diag.Diagnostic return false, diags } - convertLegacy, diags := LegacyToNodeRoles(stateVersion, planVersion) + convertLegacy, diags := legacyToNodeRoles(stateVersion, planVersion) if diags.HasError() { return false, diags @@ -57,12 +57,12 @@ func CompatibleWithNodeRoles(version string) (bool, error) { return deploymentVersion.GE(DataTiersVersion), nil } -// LegacyToNodeRoles returns true when the legacy "node_type_*" should be +// legacyToNodeRoles returns true when the legacy "node_type_*" should be // migrated over to node_roles. Which will be true when: // * The version field doesn't change. // * The version field changes but: // - The Elasticsearch.0.toplogy doesn't have any node_type_* set. -func LegacyToNodeRoles(stateVersion, planVersion types.String) (bool, diag.Diagnostics) { +func legacyToNodeRoles(stateVersion, planVersion types.String) (bool, diag.Diagnostics) { if stateVersion.Value == "" || stateVersion.Value == planVersion.Value { return true, nil } From 71c9660435aec5e8387abe56384660abd661c344 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Thu, 22 Dec 2022 18:38:13 +0100 Subject: [PATCH 051/104] fix lint errors --- ec/ecresource/deploymentresource/utils/getters.go | 2 +- .../deploymentresource/utils/node_types_to_node_roles.go | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/ec/ecresource/deploymentresource/utils/getters.go b/ec/ecresource/deploymentresource/utils/getters.go index 6a8d3fbee..0c63ca93b 100644 --- a/ec/ecresource/deploymentresource/utils/getters.go +++ b/ec/ecresource/deploymentresource/utils/getters.go @@ -93,7 +93,7 @@ func GetDeploymentTemplateID(res *models.DeploymentResources) (string, error) { return deploymentTemplateID, nil } -func GetRegion(res *models.DeploymentResources) (string) { +func GetRegion(res *models.DeploymentResources) string { for _, r := range res.Elasticsearch { if r.Region != nil && *r.Region != "" { return *r.Region diff --git a/ec/ecresource/deploymentresource/utils/node_types_to_node_roles.go b/ec/ecresource/deploymentresource/utils/node_types_to_node_roles.go index 533156f61..88e50c839 100644 --- a/ec/ecresource/deploymentresource/utils/node_types_to_node_roles.go +++ b/ec/ecresource/deploymentresource/utils/node_types_to_node_roles.go @@ -67,7 +67,6 @@ func legacyToNodeRoles(stateVersion, planVersion types.String) (bool, diag.Diagn return true, nil } - var diags diag.Diagnostics oldV, err := semver.Parse(stateVersion.Value) if err != nil { From cca7a753bd1aeffab480b3d78078562131f22f68 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Thu, 22 Dec 2022 18:46:36 +0100 Subject: [PATCH 052/104] renaming --- .../enterprisesearch/v2/enterprise_search_payload.go | 2 +- .../enterprisesearch/v2/enterprise_search_read.go | 2 +- .../enterprisesearch/v2/enterprise_search_topology.go | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_payload.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_payload.go index a1ab40cd6..3d2aa556c 100644 --- a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_payload.go +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_payload.go @@ -81,7 +81,7 @@ func (es *EnterpriseSearchTF) payload(ctx context.Context, payload models.Enterp NodeTypeWorker: es.NodeTypeWorker, } - topology, ds := enterpriseSearchTopologyPayload(ctx, topologyTF, defaultEssTopology(payload.Plan.ClusterTopology), 0) + topology, ds := enterpriseSearchTopologyPayload(ctx, topologyTF, defaultTopology(payload.Plan.ClusterTopology), 0) diags = append(diags, ds...) diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read.go index 998c2a52c..9262a429b 100644 --- a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read.go +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read.go @@ -58,7 +58,7 @@ func ReadEnterpriseSearch(in *models.EnterpriseSearchResourceInfo) (*EnterpriseS plan := in.Info.PlanInfo.Current.Plan - topologies, err := ReadEnterpriseSearchTopologies(plan.ClusterTopology) + topologies, err := readEnterpriseSearchTopologies(plan.ClusterTopology) if err != nil { return nil, err diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_topology.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_topology.go index 9c22a6d3c..35f6e846d 100644 --- a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_topology.go +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_topology.go @@ -65,7 +65,7 @@ func readEnterpriseSearchTopology(in *models.EnterpriseSearchTopologyElement) (* return &topology, nil } -func ReadEnterpriseSearchTopologies(in []*models.EnterpriseSearchTopologyElement) (enterpriseSearchTopologies, error) { +func readEnterpriseSearchTopologies(in []*models.EnterpriseSearchTopologyElement) (enterpriseSearchTopologies, error) { if len(in) == 0 { return nil, nil } @@ -99,7 +99,7 @@ func enterpriseSearchTopologyPayload(ctx context.Context, topology v1.Enterprise icID = planModels[index].InstanceConfigurationID } - elem, err := matchEssTopology(icID, planModels) + elem, err := matchTopology(icID, planModels) if err != nil { diags.AddError("cannot match enterprise search topology", err.Error()) return nil, diags @@ -131,10 +131,10 @@ func enterpriseSearchTopologyPayload(ctx context.Context, topology v1.Enterprise return elem, nil } -// defaultApmTopology iterates over all the templated topology elements and +// defaultTopology iterates over all the templated topology elements and // sets the size to the default when the template size is smaller than the // deployment template default, the same is done on the ZoneCount. -func defaultEssTopology(topology []*models.EnterpriseSearchTopologyElement) []*models.EnterpriseSearchTopologyElement { +func defaultTopology(topology []*models.EnterpriseSearchTopologyElement) []*models.EnterpriseSearchTopologyElement { for _, t := range topology { if *t.Size.Value < minimumEnterpriseSearchSize || *t.Size.Value == 0 { t.Size.Value = ec.Int32(minimumEnterpriseSearchSize) @@ -147,7 +147,7 @@ func defaultEssTopology(topology []*models.EnterpriseSearchTopologyElement) []*m return topology } -func matchEssTopology(id string, topologies []*models.EnterpriseSearchTopologyElement) (*models.EnterpriseSearchTopologyElement, error) { +func matchTopology(id string, topologies []*models.EnterpriseSearchTopologyElement) (*models.EnterpriseSearchTopologyElement, error) { for _, t := range topologies { if t.InstanceConfigurationID == id { return t, nil From 42a7c92739ff703cd14fd12bced4e4e27967e5d4 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Thu, 22 Dec 2022 19:00:54 +0100 Subject: [PATCH 053/104] renaming and obsolete code removing --- .../elasticsearch/v2/elasticsearch_payload.go | 6 +-- .../elasticsearch/v2/elasticsearch_read.go | 10 ++--- .../v2/elasticsearch_read_test.go | 2 +- .../v2/elasticsearch_remote_cluster.go | 7 ++-- .../v2/elasticsearch_topology.go | 38 +++++++++---------- .../v2/elasticsearch_trust_account.go | 6 +-- .../v2/elasticsearch_trust_external.go | 10 ++--- 7 files changed, 37 insertions(+), 42 deletions(-) diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go index 7464075df..73bf45865 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go @@ -120,10 +120,10 @@ func (es *ElasticsearchTF) payload(ctx context.Context, res *models.Elasticsearc } } - res.Settings, ds = ElasticsearchTrustAccountPayload(ctx, es.TrustAccount, res.Settings) + res.Settings, ds = elasticsearchTrustAccountPayload(ctx, es.TrustAccount, res.Settings) diags.Append(ds...) - res.Settings, ds = ElasticsearchTrustExternalPayload(ctx, es.TrustExternal, res.Settings) + res.Settings, ds = elasticsearchTrustExternalPayload(ctx, es.TrustExternal, res.Settings) diags.Append(ds...) elasticsearchStrategyPayload(es.Strategy, res.Plan) @@ -155,7 +155,7 @@ func topologyPayload(ctx context.Context, topologyObj types.Object, id string, t diags.Append(ds...) if !ds.HasError() { - diags.Append(topology.Payload(ctx, id, topologies)...) + diags.Append(topology.payload(ctx, id, topologies)...) } } diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read.go index 4356ba1f3..bf3ddc354 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read.go @@ -89,7 +89,7 @@ func readElasticsearch(in *models.ElasticsearchResourceInfo, remotes *models.Rem plan := in.Info.PlanInfo.Current.Plan var err error - topologies, err := ReadElasticsearchTopologies(plan) + topologies, err := readElasticsearchTopologies(plan) if err != nil { return nil, err } @@ -110,7 +110,7 @@ func readElasticsearch(in *models.ElasticsearchResourceInfo, remotes *models.Rem return nil, err } - clusters, err := ReadElasticsearchRemoteClusters(remotes.Resources) + clusters, err := readElasticsearchRemoteClusters(remotes.Resources) if err != nil { return nil, err } @@ -122,13 +122,13 @@ func readElasticsearch(in *models.ElasticsearchResourceInfo, remotes *models.Rem } es.Extension = extensions - accounts, err := ReadElasticsearchTrustAccounts(in.Info.Settings) + accounts, err := readElasticsearchTrustAccounts(in.Info.Settings) if err != nil { return nil, err } es.TrustAccount = accounts - externals, err := ReadElasticsearchTrustExternals(in.Info.Settings) + externals, err := readElasticsearchTrustExternals(in.Info.Settings) if err != nil { return nil, err } @@ -138,7 +138,7 @@ func readElasticsearch(in *models.ElasticsearchResourceInfo, remotes *models.Rem } func (es *Elasticsearch) setTopology(topologies ElasticsearchTopologies) { - set := topologies.Set() + set := topologies.AsSet() for id, topology := range set { topology := topology diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go index 031263b2e..5f6b97324 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go @@ -391,7 +391,7 @@ func Test_readElasticsearchTopology(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := ReadElasticsearchTopologies(tt.args.plan) + got, err := readElasticsearchTopologies(tt.args.plan) if err != nil && !assert.EqualError(t, err, tt.err) { t.Error(err) } diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_remote_cluster.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_remote_cluster.go index 7bdfe2fe9..a2c9720e7 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_remote_cluster.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_remote_cluster.go @@ -42,7 +42,7 @@ type ElasticsearchRemoteCluster struct { type ElasticsearchRemoteClusters []ElasticsearchRemoteCluster -func ReadElasticsearchRemoteClusters(in []*models.RemoteResourceRef) (ElasticsearchRemoteClusters, error) { +func readElasticsearchRemoteClusters(in []*models.RemoteResourceRef) (ElasticsearchRemoteClusters, error) { if len(in) == 0 { return nil, nil } @@ -50,11 +50,10 @@ func ReadElasticsearchRemoteClusters(in []*models.RemoteResourceRef) (Elasticsea clusters := make(ElasticsearchRemoteClusters, 0, len(in)) for _, model := range in { - cluster, err := ReadElasticsearchRemoteCluster(model) + cluster, err := readElasticsearchRemoteCluster(model) if err != nil { return nil, err } - // clusters[*cluster.DeploymentId] = *cluster clusters = append(clusters, *cluster) } @@ -95,7 +94,7 @@ func ElasticsearchRemoteClustersPayload(ctx context.Context, clustersTF types.Se return &payloads, nil } -func ReadElasticsearchRemoteCluster(in *models.RemoteResourceRef) (*ElasticsearchRemoteCluster, error) { +func readElasticsearchRemoteCluster(in *models.RemoteResourceRef) (*ElasticsearchRemoteCluster, error) { var cluster ElasticsearchRemoteCluster if in.DeploymentID != nil && *in.DeploymentID != "" { diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go index 33769ea8f..8db932188 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go @@ -72,7 +72,7 @@ func CreateTierForTest(tierId string, tier ElasticsearchTopology) *Elasticsearch type ElasticsearchTopologyAutoscaling v1.ElasticsearchTopologyAutoscaling -func (topology ElasticsearchTopologyTF) Payload(ctx context.Context, topologyID string, planTopologies []*models.ElasticsearchClusterTopologyElement) diag.Diagnostics { +func (topology ElasticsearchTopologyTF) payload(ctx context.Context, topologyID string, planTopologies []*models.ElasticsearchClusterTopologyElement) diag.Diagnostics { var diags diag.Diagnostics topologyElem, err := matchEsTopologyID(topologyID, planTopologies) @@ -94,7 +94,7 @@ func (topology ElasticsearchTopologyTF) Payload(ctx context.Context, topologyID topologyElem.ZoneCount = int32(topology.ZoneCount.Value) } - if err := topology.ParseLegacyNodeType(topologyElem.NodeType); err != nil { + if err := topology.parseLegacyNodeType(topologyElem.NodeType); err != nil { diags.AddError("topology legacy node type error", err.Error()) } @@ -107,14 +107,14 @@ func (topology ElasticsearchTopologyTF) Payload(ctx context.Context, topologyID topologyElem.NodeType = nil } - diags.Append(ElasticsearchTopologyAutoscalingPayload(ctx, topology.Autoscaling, topologyID, topologyElem)...) + diags.Append(elasticsearchTopologyAutoscalingPayload(ctx, topology.Autoscaling, topologyID, topologyElem)...) diags = append(diags, ds...) return diags } -func ReadElasticsearchTopologies(in *models.ElasticsearchClusterPlan) (ElasticsearchTopologies, error) { +func readElasticsearchTopologies(in *models.ElasticsearchClusterPlan) (ElasticsearchTopologies, error) { if len(in.ClusterTopology) == 0 { return nil, nil } @@ -122,11 +122,7 @@ func ReadElasticsearchTopologies(in *models.ElasticsearchClusterPlan) (Elasticse tops := make([]ElasticsearchTopology, 0, len(in.ClusterTopology)) for _, model := range in.ClusterTopology { - // if !v1.IsPotentiallySizedTopology(model, in.AutoscalingEnabled != nil && *in.AutoscalingEnabled) { - // continue - // } - - topology, err := ReadElasticsearchTopology(model) + topology, err := readElasticsearchTopology(model) if err != nil { return nil, err } @@ -136,7 +132,7 @@ func ReadElasticsearchTopologies(in *models.ElasticsearchClusterPlan) (Elasticse return tops, nil } -func ReadElasticsearchTopology(model *models.ElasticsearchClusterTopologyElement) (*ElasticsearchTopology, error) { +func readElasticsearchTopology(model *models.ElasticsearchClusterTopologyElement) (*ElasticsearchTopology, error) { var topology ElasticsearchTopology topology.id = model.ID @@ -172,7 +168,7 @@ func ReadElasticsearchTopology(model *models.ElasticsearchClusterTopologyElement topology.NodeRoles = model.NodeRoles - autoscaling, err := ReadElasticsearchTopologyAutoscaling(model) + autoscaling, err := readElasticsearchTopologyAutoscaling(model) if err != nil { return nil, err } @@ -181,17 +177,17 @@ func ReadElasticsearchTopology(model *models.ElasticsearchClusterTopologyElement return &topology, nil } -func ReadElasticsearchTopologyAutoscaling(topology *models.ElasticsearchClusterTopologyElement) (*ElasticsearchTopologyAutoscaling, error) { +func readElasticsearchTopologyAutoscaling(topology *models.ElasticsearchClusterTopologyElement) (*ElasticsearchTopologyAutoscaling, error) { var a ElasticsearchTopologyAutoscaling - if ascale := topology.AutoscalingMax; ascale != nil { - a.MaxSizeResource = ascale.Resource - a.MaxSize = ec.String(util.MemoryToState(*ascale.Value)) + if max := topology.AutoscalingMax; max != nil { + a.MaxSizeResource = max.Resource + a.MaxSize = ec.String(util.MemoryToState(*max.Value)) } - if ascale := topology.AutoscalingMin; ascale != nil { - a.MinSizeResource = ascale.Resource - a.MinSize = ec.String(util.MemoryToState(*ascale.Value)) + if min := topology.AutoscalingMin; min != nil { + a.MinSizeResource = min.Resource + a.MinSize = ec.String(util.MemoryToState(*min.Value)) } if topology.AutoscalingPolicyOverrideJSON != nil { @@ -205,7 +201,7 @@ func ReadElasticsearchTopologyAutoscaling(topology *models.ElasticsearchClusterT return &a, nil } -func (topology *ElasticsearchTopologyTF) ParseLegacyNodeType(nodeType *models.ElasticsearchNodeType) error { +func (topology *ElasticsearchTopologyTF) parseLegacyNodeType(nodeType *models.ElasticsearchNodeType) error { if nodeType == nil { return nil } @@ -268,7 +264,7 @@ func ObjectToTopology(ctx context.Context, obj types.Object) (*ElasticsearchTopo type ElasticsearchTopologies []ElasticsearchTopology -func (tops ElasticsearchTopologies) Set() map[string]ElasticsearchTopology { +func (tops ElasticsearchTopologies) AsSet() map[string]ElasticsearchTopology { set := make(map[string]ElasticsearchTopology, len(tops)) for _, top := range tops { @@ -306,7 +302,7 @@ func topologyIDs(topologies []*models.ElasticsearchClusterTopologyElement) []str return result } -func ElasticsearchTopologyAutoscalingPayload(ctx context.Context, autoObj attr.Value, topologyID string, payload *models.ElasticsearchClusterTopologyElement) diag.Diagnostics { +func elasticsearchTopologyAutoscalingPayload(ctx context.Context, autoObj attr.Value, topologyID string, payload *models.ElasticsearchClusterTopologyElement) diag.Diagnostics { var diag diag.Diagnostics if autoObj.IsNull() || autoObj.IsUnknown() { diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_trust_account.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_trust_account.go index 4d06125b8..9f843f5fa 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_trust_account.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_trust_account.go @@ -24,7 +24,7 @@ import ( type ElasticsearchTrustAccounts v1.ElasticsearchTrustAccounts -func ReadElasticsearchTrustAccounts(in *models.ElasticsearchClusterSettings) (ElasticsearchTrustAccounts, error) { +func readElasticsearchTrustAccounts(in *models.ElasticsearchClusterSettings) (ElasticsearchTrustAccounts, error) { if in == nil || in.Trust == nil { return nil, nil } @@ -32,7 +32,7 @@ func ReadElasticsearchTrustAccounts(in *models.ElasticsearchClusterSettings) (El accounts := make(ElasticsearchTrustAccounts, 0, len(in.Trust.Accounts)) for _, model := range in.Trust.Accounts { - account, err := ReadElasticsearchTrustAccount(model) + account, err := readElasticsearchTrustAccount(model) if err != nil { return nil, err } @@ -42,7 +42,7 @@ func ReadElasticsearchTrustAccounts(in *models.ElasticsearchClusterSettings) (El return accounts, nil } -func ReadElasticsearchTrustAccount(in *models.AccountTrustRelationship) (*v1.ElasticsearchTrustAccount, error) { +func readElasticsearchTrustAccount(in *models.AccountTrustRelationship) (*v1.ElasticsearchTrustAccount, error) { var acc v1.ElasticsearchTrustAccount if in.AccountID != nil { diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_trust_external.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_trust_external.go index a2f3d6691..0aa2e3731 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_trust_external.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_trust_external.go @@ -29,7 +29,7 @@ import ( type ElasticsearchTrustExternals v1.ElasticsearchTrustExternals -func ReadElasticsearchTrustExternals(in *models.ElasticsearchClusterSettings) (ElasticsearchTrustExternals, error) { +func readElasticsearchTrustExternals(in *models.ElasticsearchClusterSettings) (ElasticsearchTrustExternals, error) { if in == nil || in.Trust == nil { return nil, nil } @@ -37,7 +37,7 @@ func ReadElasticsearchTrustExternals(in *models.ElasticsearchClusterSettings) (E externals := make(ElasticsearchTrustExternals, 0, len(in.Trust.External)) for _, model := range in.Trust.External { - external, err := ReadElasticsearchTrustExternal(model) + external, err := readElasticsearchTrustExternal(model) if err != nil { return nil, err } @@ -47,7 +47,7 @@ func ReadElasticsearchTrustExternals(in *models.ElasticsearchClusterSettings) (E return externals, nil } -func ElasticsearchTrustExternalPayload(ctx context.Context, externals types.Set, model *models.ElasticsearchClusterSettings) (*models.ElasticsearchClusterSettings, diag.Diagnostics) { +func elasticsearchTrustExternalPayload(ctx context.Context, externals types.Set, model *models.ElasticsearchClusterSettings) (*models.ElasticsearchClusterSettings, diag.Diagnostics) { var diags diag.Diagnostics payloads := make([]*models.ExternalTrustRelationship, 0, len(externals.Elems)) @@ -99,7 +99,7 @@ func ElasticsearchTrustExternalPayload(ctx context.Context, externals types.Set, return model, nil } -func ReadElasticsearchTrustExternal(in *models.ExternalTrustRelationship) (*v1.ElasticsearchTrustExternal, error) { +func readElasticsearchTrustExternal(in *models.ExternalTrustRelationship) (*v1.ElasticsearchTrustExternal, error) { var ext v1.ElasticsearchTrustExternal if in.TrustRelationshipID != nil { @@ -115,7 +115,7 @@ func ReadElasticsearchTrustExternal(in *models.ExternalTrustRelationship) (*v1.E return &ext, nil } -func ElasticsearchTrustAccountPayload(ctx context.Context, accounts types.Set, model *models.ElasticsearchClusterSettings) (*models.ElasticsearchClusterSettings, diag.Diagnostics) { +func elasticsearchTrustAccountPayload(ctx context.Context, accounts types.Set, model *models.ElasticsearchClusterSettings) (*models.ElasticsearchClusterSettings, diag.Diagnostics) { var diags diag.Diagnostics payloads := make([]*models.AccountTrustRelationship, 0, len(accounts.Elems)) From 2d108e50a8370c75efde78e708873b1d4d5ac43b Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Tue, 27 Dec 2022 12:07:31 +0100 Subject: [PATCH 054/104] renaming according to PR comments --- ec/ecdatasource/deploymentsdatasource/datasource.go | 4 ++-- ec/ecdatasource/deploymentsdatasource/expanders.go | 2 +- ec/ecdatasource/deploymentsdatasource/schema.go | 4 ++-- ec/ecdatasource/stackdatasource/datasource.go | 4 ++-- ec/ecdatasource/stackdatasource/datasource_test.go | 4 ++-- .../stackdatasource/flatteners_elasticsearch.go | 4 ++-- .../stackdatasource/flatteners_elasticsearch_test.go | 6 +++--- ec/ecdatasource/stackdatasource/schema.go | 12 ++++++------ 8 files changed, 20 insertions(+), 20 deletions(-) diff --git a/ec/ecdatasource/deploymentsdatasource/datasource.go b/ec/ecdatasource/deploymentsdatasource/datasource.go index f0252df40..18700210b 100644 --- a/ec/ecdatasource/deploymentsdatasource/datasource.go +++ b/ec/ecdatasource/deploymentsdatasource/datasource.go @@ -122,8 +122,8 @@ func modelToState(ctx context.Context, res *models.DeploymentsSearchResponse, st } if len(deployment.Resources.Elasticsearch) > 0 { - m.ElasticSearchResourceID = types.String{Value: *deployment.Resources.Elasticsearch[0].ID} - m.ElasticSearchRefID = types.String{Value: *deployment.Resources.Elasticsearch[0].RefID} + m.ElasticsearchResourceID = types.String{Value: *deployment.Resources.Elasticsearch[0].ID} + m.ElasticsearchRefID = types.String{Value: *deployment.Resources.Elasticsearch[0].RefID} } if len(deployment.Resources.Kibana) > 0 { diff --git a/ec/ecdatasource/deploymentsdatasource/expanders.go b/ec/ecdatasource/deploymentsdatasource/expanders.go index 7f21c46e7..4973772e8 100644 --- a/ec/ecdatasource/deploymentsdatasource/expanders.go +++ b/ec/ecdatasource/deploymentsdatasource/expanders.go @@ -57,7 +57,7 @@ func expandFilters(ctx context.Context, state modelV0) (*models.SearchRequest, d if healthy != "" { if healthy != "true" && healthy != "false" { diags.AddError("invalid value for healthy", - fmt.Sprintf("invalid value for healthy (true|false): '%s'", healthy)) + fmt.Sprintf("expected either [true] or [false] but got [%s]", healthy)) return nil, diags } diff --git a/ec/ecdatasource/deploymentsdatasource/schema.go b/ec/ecdatasource/deploymentsdatasource/schema.go index 75611be84..ea7123f04 100644 --- a/ec/ecdatasource/deploymentsdatasource/schema.go +++ b/ec/ecdatasource/deploymentsdatasource/schema.go @@ -238,8 +238,8 @@ type deploymentModelV0 struct { DeploymentID types.String `tfsdk:"deployment_id"` Name types.String `tfsdk:"name"` Alias types.String `tfsdk:"alias"` - ElasticSearchResourceID types.String `tfsdk:"elasticsearch_resource_id"` - ElasticSearchRefID types.String `tfsdk:"elasticsearch_ref_id"` + ElasticsearchResourceID types.String `tfsdk:"elasticsearch_resource_id"` + ElasticsearchRefID types.String `tfsdk:"elasticsearch_ref_id"` KibanaResourceID types.String `tfsdk:"kibana_resource_id"` KibanaRefID types.String `tfsdk:"kibana_ref_id"` ApmResourceID types.String `tfsdk:"apm_resource_id"` diff --git a/ec/ecdatasource/stackdatasource/datasource.go b/ec/ecdatasource/stackdatasource/datasource.go index 6acf139a1..b2a7f0982 100644 --- a/ec/ecdatasource/stackdatasource/datasource.go +++ b/ec/ecdatasource/stackdatasource/datasource.go @@ -153,8 +153,8 @@ func stackFromFilters(expr, version string, locked bool, stacks []*models.StackV ) } -func newElasticsearchConfigModelV0() elasticSearchConfigModelV0 { - return elasticSearchConfigModelV0{ +func newElasticsearchConfigModelV0() elasticsearchConfigModelV0 { + return elasticsearchConfigModelV0{ DenyList: types.List{ElemType: types.StringType}, CompatibleNodeTypes: types.List{ElemType: types.StringType}, Plugins: types.List{ElemType: types.StringType}, diff --git a/ec/ecdatasource/stackdatasource/datasource_test.go b/ec/ecdatasource/stackdatasource/datasource_test.go index 44e885ce9..0d18680f0 100644 --- a/ec/ecdatasource/stackdatasource/datasource_test.go +++ b/ec/ecdatasource/stackdatasource/datasource_test.go @@ -135,10 +135,10 @@ func newSampleStack() modelV0 { MinUpgradableFrom: types.String{Value: "6.8.0"}, Elasticsearch: types.List{ ElemType: types.ObjectType{ - AttrTypes: elasticSearchConfigAttrTypes(), + AttrTypes: elasticsearchConfigAttrTypes(), }, Elems: []attr.Value{types.Object{ - AttrTypes: elasticSearchConfigAttrTypes(), + AttrTypes: elasticsearchConfigAttrTypes(), Attrs: map[string]attr.Value{ "denylist": util.StringListAsType([]string{"some"}), "capacity_constraints_max": types.Int64{Value: 8192}, diff --git a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go index 2c3105d28..e003a4e40 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go +++ b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go @@ -72,9 +72,9 @@ func flattenStackVersionElasticsearchConfig(ctx context.Context, res *models.Sta return diags } - diags.Append(tfsdk.ValueFrom(ctx, []elasticSearchConfigModelV0{model}, types.ListType{ + diags.Append(tfsdk.ValueFrom(ctx, []elasticsearchConfigModelV0{model}, types.ListType{ ElemType: types.ObjectType{ - AttrTypes: elasticSearchConfigAttrTypes(), + AttrTypes: elasticsearchConfigAttrTypes(), }, }, target)...) diff --git a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go index f4bd77d19..80de85305 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go +++ b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go @@ -37,7 +37,7 @@ func Test_flattenElasticsearchResource(t *testing.T) { tests := []struct { name string args args - want []elasticSearchConfigModelV0 + want []elasticsearchConfigModelV0 }{ { name: "empty resource list returns empty list", @@ -75,7 +75,7 @@ func Test_flattenElasticsearchResource(t *testing.T) { "repository-gcs", }, }}, - want: []elasticSearchConfigModelV0{{ + want: []elasticsearchConfigModelV0{{ DenyList: util.StringListAsType([]string{"some"}), CapacityConstraintsMax: types.Int64{Value: 8192}, CapacityConstraintsMin: types.Int64{Value: 512}, @@ -106,7 +106,7 @@ func Test_flattenElasticsearchResource(t *testing.T) { diags := flattenStackVersionElasticsearchConfig(context.Background(), tt.args.res, &newState.Elasticsearch) assert.Empty(t, diags) - var got []elasticSearchConfigModelV0 + var got []elasticsearchConfigModelV0 newState.Elasticsearch.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) }) diff --git a/ec/ecdatasource/stackdatasource/schema.go b/ec/ecdatasource/stackdatasource/schema.go index 58f98d279..9a9ea3b3b 100644 --- a/ec/ecdatasource/stackdatasource/schema.go +++ b/ec/ecdatasource/stackdatasource/schema.go @@ -80,13 +80,13 @@ func (d *DataSource) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnost }, "apm": resourceKindConfigSchema(Apm), "enterprise_search": resourceKindConfigSchema(EnterpriseSearch), - "elasticsearch": elasticSearchConfigSchema(), + "elasticsearch": elasticsearchConfigSchema(), "kibana": resourceKindConfigSchema(Kibana), }, }, nil } -func elasticSearchConfigSchema() tfsdk.Attribute { +func elasticsearchConfigSchema() tfsdk.Attribute { return tfsdk.Attribute{ Description: "Information for Elasticsearch workloads on this stack version.", Computed: true, @@ -134,8 +134,8 @@ func elasticSearchConfigSchema() tfsdk.Attribute { } } -func elasticSearchConfigAttrTypes() map[string]attr.Type { - return elasticSearchConfigSchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes +func elasticsearchConfigAttrTypes() map[string]attr.Type { + return elasticsearchConfigSchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes } func resourceKindConfigSchema(resourceKind ResourceKind) tfsdk.Attribute { @@ -197,11 +197,11 @@ type modelV0 struct { AllowListed types.Bool `tfsdk:"allowlisted"` Apm types.List `tfsdk:"apm"` //< resourceKindConfigModelV0 EnterpriseSearch types.List `tfsdk:"enterprise_search"` //< resourceKindConfigModelV0 - Elasticsearch types.List `tfsdk:"elasticsearch"` //< elasticSearchConfigModelV0 + Elasticsearch types.List `tfsdk:"elasticsearch"` //< elasticsearchConfigModelV0 Kibana types.List `tfsdk:"kibana"` //< resourceKindConfigModelV0 } -type elasticSearchConfigModelV0 struct { +type elasticsearchConfigModelV0 struct { DenyList types.List `tfsdk:"denylist"` CapacityConstraintsMax types.Int64 `tfsdk:"capacity_constraints_max"` CapacityConstraintsMin types.Int64 `tfsdk:"capacity_constraints_min"` From b460efbd9b89fd255febc2b0bc4e0d86579932d3 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Tue, 27 Dec 2022 12:17:33 +0100 Subject: [PATCH 055/104] renaming + incorrect comment removal --- .../elasticsearch/v2/node_roles_plan_modifier.go | 1 - .../deploymentresource/utils/node_types_to_node_roles.go | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier.go index b66a0cbf4..37940a66b 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier.go @@ -26,7 +26,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" ) -// Use `self` as value of `observability`'s `deployment_id` attribute func UseNodeRolesDefault() tfsdk.AttributePlanModifier { return nodeRolesDefault{} } diff --git a/ec/ecresource/deploymentresource/utils/node_types_to_node_roles.go b/ec/ecresource/deploymentresource/utils/node_types_to_node_roles.go index 88e50c839..b2bb7d2d7 100644 --- a/ec/ecresource/deploymentresource/utils/node_types_to_node_roles.go +++ b/ec/ecresource/deploymentresource/utils/node_types_to_node_roles.go @@ -68,12 +68,12 @@ func legacyToNodeRoles(stateVersion, planVersion types.String) (bool, diag.Diagn } var diags diag.Diagnostics - oldV, err := semver.Parse(stateVersion.Value) + oldVersion, err := semver.Parse(stateVersion.Value) if err != nil { diags.AddError("failed to parse previous Elasticsearch version", err.Error()) return false, diags } - newV, err := semver.Parse(planVersion.Value) + newVersion, err := semver.Parse(planVersion.Value) if err != nil { diags.AddError("failed to parse new Elasticsearch version", err.Error()) return false, diags @@ -81,7 +81,7 @@ func legacyToNodeRoles(stateVersion, planVersion types.String) (bool, diag.Diagn // if the version change moves from non-node_roles to one // that supports node roles, do not migrate on that step. - if oldV.LT(DataTiersVersion) && newV.GE(DataTiersVersion) { + if oldVersion.LT(DataTiersVersion) && newVersion.GE(DataTiersVersion) { return false, nil } From b1862d4154276996f79492f4c7cb44a8a71f13c6 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Tue, 27 Dec 2022 12:23:17 +0100 Subject: [PATCH 056/104] fix unit test --- ec/ecdatasource/deploymentsdatasource/expanders_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ec/ecdatasource/deploymentsdatasource/expanders_test.go b/ec/ecdatasource/deploymentsdatasource/expanders_test.go index 8e1219ab5..dc01f56e6 100644 --- a/ec/ecdatasource/deploymentsdatasource/expanders_test.go +++ b/ec/ecdatasource/deploymentsdatasource/expanders_test.go @@ -136,7 +136,7 @@ func Test_expandFilters(t *testing.T) { { name: "fails to parse the data source", args: args{state: newInvalidFilters()}, - diags: diag.Diagnostics{diag.NewErrorDiagnostic("invalid value for healthy", "invalid value for healthy (true|false): 'invalid value'")}, + diags: diag.Diagnostics{diag.NewErrorDiagnostic("invalid value for healthy", "expected either [true] or [false] but got [invalid value]")}, }, } for _, tt := range tests { From 8c938b137cc52bafe6e32538c862a4a5f6392b05 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Tue, 27 Dec 2022 12:47:41 +0100 Subject: [PATCH 057/104] convert 'elasticsearch' 'autoscale' attribute to bool type --- .../v2/deployment_create_payload_test.go | 8 ++++---- .../deployment/v2/deployment_read_test.go | 18 +++++++++--------- .../v2/deployment_update_payload_test.go | 4 ++-- .../elasticsearch/v2/elasticsearch_payload.go | 12 +++--------- .../v2/elasticsearch_payload_test.go | 8 ++++---- .../elasticsearch/v2/elasticsearch_read.go | 7 ++----- .../elasticsearch/v2/schema.go | 2 +- 7 files changed, 25 insertions(+), 34 deletions(-) diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload_test.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload_test.go index 2f3f16aea..16c8bf342 100644 --- a/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload_test.go +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload_test.go @@ -1603,7 +1603,7 @@ func Test_createRequest(t *testing.T) { Version: "7.12.0", Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), - Autoscale: ec.String("true"), + Autoscale: ec.Bool(true), HotTier: elasticsearchv2.CreateTierForTest( "hot_content", elasticsearchv2.ElasticsearchTopology{ @@ -1755,7 +1755,7 @@ func Test_createRequest(t *testing.T) { Version: "7.12.0", Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), - Autoscale: ec.String("true"), + Autoscale: ec.Bool(true), HotTier: elasticsearchv2.CreateTierForTest( "hot_content", elasticsearchv2.ElasticsearchTopology{ @@ -2475,7 +2475,7 @@ func Test_createRequest(t *testing.T) { Config: &elasticsearchv2.ElasticsearchConfig{ DockerImage: ec.String("docker.elastic.com/elasticsearch/container:7.14.1-hash"), }, - Autoscale: ec.String("false"), + Autoscale: ec.Bool(false), TrustAccount: elasticsearchv2.ElasticsearchTrustAccounts{ { AccountId: ec.String("ANID"), @@ -2655,7 +2655,7 @@ func Test_createRequest(t *testing.T) { Version: "7.12.0", Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), - Autoscale: ec.String("false"), + Autoscale: ec.Bool(false), TrustAccount: elasticsearchv2.ElasticsearchTrustAccounts{ { AccountId: ec.String("ANID"), diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_read_test.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_read_test.go index bb32bb69c..390a3f629 100644 --- a/ec/ecresource/deploymentresource/deployment/v2/deployment_read_test.go +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_read_test.go @@ -479,7 +479,7 @@ func Test_readDeployment(t *testing.T) { RefId: ec.String("main-elasticsearch"), ResourceId: ec.String("1238f19957874af69306787dca662154"), Region: ec.String("azure-eastus2"), - Autoscale: ec.String("false"), + Autoscale: ec.Bool(false), CloudID: ec.String("up2d:somecloudID"), HttpEndpoint: ec.String("http://1238f19957874af69306787dca662154.eastus2.azure.elastic-cloud.com:9200"), HttpsEndpoint: ec.String("https://1238f19957874af69306787dca662154.eastus2.azure.elastic-cloud.com:9243"), @@ -540,7 +540,7 @@ func Test_readDeployment(t *testing.T) { RefId: ec.String("main-elasticsearch"), ResourceId: ec.String("1239f7ee7196439ba2d105319ac5eba7"), Region: ec.String("aws-eu-central-1"), - Autoscale: ec.String("false"), + Autoscale: ec.Bool(false), CloudID: ec.String("up2d:someCloudID"), HttpEndpoint: ec.String("http://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9200"), HttpsEndpoint: ec.String("https://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9243"), @@ -603,7 +603,7 @@ func Test_readDeployment(t *testing.T) { RefId: ec.String("main-elasticsearch"), ResourceId: ec.String("1239f7ee7196439ba2d105319ac5eba7"), Region: ec.String("aws-eu-central-1"), - Autoscale: ec.String("false"), + Autoscale: ec.Bool(false), CloudID: ec.String("up2d:someCloudID"), HttpEndpoint: ec.String("http://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9200"), HttpsEndpoint: ec.String("https://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9243"), @@ -1060,7 +1060,7 @@ func Test_readDeployment(t *testing.T) { RefId: ec.String("main-elasticsearch"), ResourceId: ec.String("1239f7ee7196439ba2d105319ac5eba7"), Region: ec.String("aws-eu-central-1"), - Autoscale: ec.String("false"), + Autoscale: ec.Bool(false), CloudID: ec.String("up2d:someCloudID"), HttpEndpoint: ec.String("http://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9200"), HttpsEndpoint: ec.String("https://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9243"), @@ -1121,7 +1121,7 @@ func Test_readDeployment(t *testing.T) { RefId: ec.String("main-elasticsearch"), ResourceId: ec.String("123695e76d914005bf90b717e668ad4b"), Region: ec.String("gcp-asia-east1"), - Autoscale: ec.String("false"), + Autoscale: ec.Bool(false), CloudID: ec.String("up2d:someCloudID"), HttpEndpoint: ec.String("http://123695e76d914005bf90b717e668ad4b.asia-east1.gcp.elastic-cloud.com:9200"), HttpsEndpoint: ec.String("https://123695e76d914005bf90b717e668ad4b.asia-east1.gcp.elastic-cloud.com:9243"), @@ -1182,7 +1182,7 @@ func Test_readDeployment(t *testing.T) { RefId: ec.String("main-elasticsearch"), ResourceId: ec.String("123695e76d914005bf90b717e668ad4b"), Region: ec.String("gcp-asia-east1"), - Autoscale: ec.String("true"), + Autoscale: ec.Bool(true), CloudID: ec.String("up2d:someCloudID"), HttpEndpoint: ec.String("http://123695e76d914005bf90b717e668ad4b.asia-east1.gcp.elastic-cloud.com:9200"), HttpsEndpoint: ec.String("https://123695e76d914005bf90b717e668ad4b.asia-east1.gcp.elastic-cloud.com:9243"), @@ -1293,7 +1293,7 @@ func Test_readDeployment(t *testing.T) { RefId: ec.String("main-elasticsearch"), ResourceId: ec.String("123e837db6ee4391bb74887be35a7a91"), Region: ec.String("gcp-us-central1"), - Autoscale: ec.String("false"), + Autoscale: ec.Bool(false), CloudID: ec.String("up2d-hot-warm:someCloudID"), HttpEndpoint: ec.String("http://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9200"), HttpsEndpoint: ec.String("https://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9243"), @@ -1381,7 +1381,7 @@ func Test_readDeployment(t *testing.T) { RefId: ec.String("main-elasticsearch"), ResourceId: ec.String("123e837db6ee4391bb74887be35a7a91"), Region: ec.String("gcp-us-central1"), - Autoscale: ec.String("false"), + Autoscale: ec.Bool(false), CloudID: ec.String("up2d-hot-warm:someCloudID"), HttpEndpoint: ec.String("http://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9200"), HttpsEndpoint: ec.String("https://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9243"), @@ -1506,7 +1506,7 @@ func Test_readDeployment(t *testing.T) { RefId: ec.String("main-elasticsearch"), ResourceId: ec.String("1230b3ae633b4f51a432d50971f7f1c1"), Region: ec.String("eu-west-1"), - Autoscale: ec.String("false"), + Autoscale: ec.Bool(false), CloudID: ec.String("ccs:someCloudID"), HttpEndpoint: ec.String("http://1230b3ae633b4f51a432d50971f7f1c1.eu-west-1.aws.found.io:9200"), HttpsEndpoint: ec.String("https://1230b3ae633b4f51a432d50971f7f1c1.eu-west-1.aws.found.io:9243"), diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload_test.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload_test.go index d062da24f..5846390e2 100644 --- a/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload_test.go +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload_test.go @@ -1557,7 +1557,7 @@ func Test_updateResourceToModel(t *testing.T) { Version: "7.12.1", Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), - Autoscale: ec.String("true"), + Autoscale: ec.Bool(true), HotTier: elasticsearchv2.CreateTierForTest( "hot_content", elasticsearchv2.ElasticsearchTopology{ @@ -1582,7 +1582,7 @@ func Test_updateResourceToModel(t *testing.T) { Version: "7.12.1", Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), - Autoscale: ec.String("true"), + Autoscale: ec.Bool(true), HotTier: elasticsearchv2.CreateTierForTest( "hot_content", elasticsearchv2.ElasticsearchTopology{ diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go index 73bf45865..90bc5f266 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go @@ -19,7 +19,6 @@ package v2 import ( "context" - "strconv" "strings" "github.com/elastic/cloud-sdk-go/pkg/models" @@ -31,7 +30,7 @@ import ( ) type ElasticsearchTF struct { - Autoscale types.String `tfsdk:"autoscale"` + Autoscale types.Bool `tfsdk:"autoscale"` RefId types.String `tfsdk:"ref_id"` ResourceId types.String `tfsdk:"resource_id"` Region types.String `tfsdk:"region"` @@ -111,13 +110,8 @@ func (es *ElasticsearchTF) payload(ctx context.Context, res *models.Elasticsearc diags.Append(elasticsearchExtensionPayload(ctx, es.Extension, res.Plan.Elasticsearch)...) - if es.Autoscale.Value != "" { - autoscaleBool, err := strconv.ParseBool(es.Autoscale.Value) - if err != nil { - diags.AddError("failed parsing autoscale value", err.Error()) - } else { - res.Plan.AutoscalingEnabled = &autoscaleBool - } + if !es.Autoscale.IsNull() && !es.Autoscale.IsUnknown() { + res.Plan.AutoscalingEnabled = &es.Autoscale.Value } res.Settings, ds = elasticsearchTrustAccountPayload(ctx, es.TrustAccount, res.Settings) diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go index 88971b57a..4895719b2 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go @@ -911,7 +911,7 @@ func Test_writeElasticsearch(t *testing.T) { name: "autoscaling enabled", args: args{ es: Elasticsearch{ - Autoscale: ec.String("true"), + Autoscale: ec.Bool(true), RefId: ec.String("main-elasticsearch"), ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), @@ -1044,7 +1044,7 @@ func Test_writeElasticsearch(t *testing.T) { name: "autoscaling enabled overriding the size with ml", args: args{ es: Elasticsearch{ - Autoscale: ec.String("true"), + Autoscale: ec.Bool(true), RefId: ec.String("main-elasticsearch"), ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), @@ -1221,7 +1221,7 @@ func Test_writeElasticsearch(t *testing.T) { name: "autoscaling enabled no dimension in template, default resource", args: args{ es: Elasticsearch{ - Autoscale: ec.String("true"), + Autoscale: ec.Bool(true), RefId: ec.String("main-elasticsearch"), ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), @@ -1333,7 +1333,7 @@ func Test_writeElasticsearch(t *testing.T) { name: "autoscaling enabled overriding the size and resources", args: args{ es: Elasticsearch{ - Autoscale: ec.String("true"), + Autoscale: ec.Bool(true), RefId: ec.String("main-elasticsearch"), ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read.go index bf3ddc354..6924e7c51 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read.go @@ -18,10 +18,7 @@ package v2 import ( - "strconv" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" "github.com/elastic/terraform-provider-ec/ec/internal/converters" @@ -29,7 +26,7 @@ import ( ) type Elasticsearch struct { - Autoscale *string `tfsdk:"autoscale"` + Autoscale *bool `tfsdk:"autoscale"` RefId *string `tfsdk:"ref_id"` ResourceId *string `tfsdk:"resource_id"` Region *string `tfsdk:"region"` @@ -96,7 +93,7 @@ func readElasticsearch(in *models.ElasticsearchResourceInfo, remotes *models.Rem es.setTopology(topologies) if plan.AutoscalingEnabled != nil { - es.Autoscale = ec.String(strconv.FormatBool(*plan.AutoscalingEnabled)) + es.Autoscale = plan.AutoscalingEnabled } if meta := in.Info.Metadata; meta != nil && meta.CloudID != "" { diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go index ef67b4995..ec2080c5e 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go @@ -50,7 +50,7 @@ func ElasticsearchSchema() tfsdk.Attribute { Required: true, Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ "autoscale": { - Type: types.StringType, + Type: types.BoolType, Description: `Enable or disable autoscaling. Defaults to the setting coming from the deployment template. Accepted values are "true" or "false".`, Computed: true, Optional: true, From 395a3bcdeb899a40abb6108e5de632ff4b12c9c4 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Tue, 27 Dec 2022 15:25:23 +0100 Subject: [PATCH 058/104] remove TODOs for DiffSupressFunc --- ec/ecresource/deploymentresource/apm/v2/schema.go | 2 -- ec/ecresource/deploymentresource/elasticsearch/v2/schema.go | 4 +--- .../deploymentresource/enterprisesearch/v2/schema.go | 2 -- .../deploymentresource/integrationsserver/v2/schema.go | 3 --- ec/ecresource/deploymentresource/kibana/v2/schema.go | 4 +--- 5 files changed, 2 insertions(+), 13 deletions(-) diff --git a/ec/ecresource/deploymentresource/apm/v2/schema.go b/ec/ecresource/deploymentresource/apm/v2/schema.go index b52e2dbc5..2cd1f9ee2 100644 --- a/ec/ecresource/deploymentresource/apm/v2/schema.go +++ b/ec/ecresource/deploymentresource/apm/v2/schema.go @@ -29,8 +29,6 @@ func ApmConfigSchema() tfsdk.Attribute { Description: `Optionally define the Apm configuration options for the APM Server`, Optional: true, Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ - // TODO - // DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, "docker_image": { Type: types.StringType, Description: "Optionally override the docker image the APM nodes will use. This option will not work in ESS customers and should only be changed if you know what you're doing.", diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go index ec2080c5e..d089d5137 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go @@ -128,11 +128,9 @@ func ElasticsearchSchema() tfsdk.Attribute { func ElasticsearchConfigSchema() tfsdk.Attribute { return tfsdk.Attribute{ - Description: `Optional Elasticsearch settings which will be applied to all topologies unless overridden on the topology element`, + Description: `Optional Elasticsearch settings which will be applied to all topologies`, Optional: true, Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ - // TODO - // DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, "docker_image": { Type: types.StringType, Description: "Optionally override the docker image the Elasticsearch nodes will use. Note that this field will only work for internal users only.", diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/schema.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/schema.go index ebbbe94ab..a3a0b6a06 100644 --- a/ec/ecresource/deploymentresource/enterprisesearch/v2/schema.go +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/schema.go @@ -131,8 +131,6 @@ func EnterpriseSearchSchema() tfsdk.Attribute { Description: `Optionally define the Enterprise Search configuration options for the Enterprise Search Server`, Optional: true, Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ - // TODO - // DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, "docker_image": { Type: types.StringType, Description: "Optionally override the docker image the Enterprise Search nodes will use. Note that this field will only work for internal users only.", diff --git a/ec/ecresource/deploymentresource/integrationsserver/v2/schema.go b/ec/ecresource/deploymentresource/integrationsserver/v2/schema.go index 9f12453ea..92a3c0c72 100644 --- a/ec/ecresource/deploymentresource/integrationsserver/v2/schema.go +++ b/ec/ecresource/deploymentresource/integrationsserver/v2/schema.go @@ -110,14 +110,11 @@ func IntegrationsServerSchema() tfsdk.Attribute { Description: `Optionally define the IntegrationsServer configuration options for the IntegrationsServer Server`, Optional: true, Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ - // TODO - // DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, "docker_image": { Type: types.StringType, Description: "Optionally override the docker image the IntegrationsServer nodes will use. Note that this field will only work for internal users only.", Optional: true, }, - // IntegrationsServer System Settings "debug_enabled": { Type: types.BoolType, Description: `Optionally enable debug mode for IntegrationsServer servers - defaults to false`, diff --git a/ec/ecresource/deploymentresource/kibana/v2/schema.go b/ec/ecresource/deploymentresource/kibana/v2/schema.go index 3740df7ef..f7380236c 100644 --- a/ec/ecresource/deploymentresource/kibana/v2/schema.go +++ b/ec/ecresource/deploymentresource/kibana/v2/schema.go @@ -107,9 +107,7 @@ func KibanaSchema() tfsdk.Attribute { }, }, "config": { - Optional: true, - // TODO - // DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + Optional: true, Description: `Optionally define the Kibana configuration options for the Kibana Server`, Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ "docker_image": { From 894bdb58c5dcbc15c7b2afd1a6ef272582b12933 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Tue, 27 Dec 2022 18:44:23 +0100 Subject: [PATCH 059/104] unit test for handling traffic rules --- ec/ecresource/deploymentresource/update.go | 18 +- .../deploymentresource/update_test.go | 214 ++++++++++++++++++ 2 files changed, 226 insertions(+), 6 deletions(-) create mode 100644 ec/ecresource/deploymentresource/update_test.go diff --git a/ec/ecresource/deploymentresource/update.go b/ec/ecresource/deploymentresource/update.go index 29dfb2715..8c6498fe9 100644 --- a/ec/ecresource/deploymentresource/update.go +++ b/ec/ecresource/deploymentresource/update.go @@ -72,7 +72,7 @@ func (r *Resource) Update(ctx context.Context, req resource.UpdateRequest, resp return } - resp.Diagnostics.Append(handleTrafficFilterChange(ctx, r.client, plan, state)...) + resp.Diagnostics.Append(HandleTrafficFilterChange(ctx, r.client, plan, state)...) resp.Diagnostics.Append(v2.HandleRemoteClusters(ctx, r.client, plan.Id.Value, plan.Elasticsearch)...) @@ -89,7 +89,7 @@ func (r *Resource) Update(ctx context.Context, req resource.UpdateRequest, resp resp.Diagnostics.Append(resp.State.Set(ctx, deployment)...) } -func handleTrafficFilterChange(ctx context.Context, client *api.API, plan, state v2.DeploymentTF) diag.Diagnostics { +func HandleTrafficFilterChange(ctx context.Context, client *api.API, plan, state v2.DeploymentTF) diag.Diagnostics { if plan.TrafficFilter.IsNull() || plan.TrafficFilter.Equal(state.TrafficFilter) { return nil } @@ -144,8 +144,14 @@ func (rs ruleSet) exist(rule string) bool { return false } +var ( + GetAssociation = trafficfilterapi.Get + CreateAssociation = trafficfilterapi.CreateAssociation + DeleteAssociation = trafficfilterapi.DeleteAssociation +) + func associateRule(ruleID, deploymentID string, client *api.API) error { - res, err := trafficfilterapi.Get(trafficfilterapi.GetParams{ + res, err := GetAssociation(trafficfilterapi.GetParams{ API: client, ID: ruleID, IncludeAssociations: true, }) if err != nil { @@ -160,7 +166,7 @@ func associateRule(ruleID, deploymentID string, client *api.API) error { } // Create assignment. - if err := trafficfilterapi.CreateAssociation(trafficfilterapi.CreateAssociationParams{ + if err := CreateAssociation(trafficfilterapi.CreateAssociationParams{ API: client, ID: ruleID, EntityType: "deployment", EntityID: deploymentID, }); err != nil { return err @@ -169,7 +175,7 @@ func associateRule(ruleID, deploymentID string, client *api.API) error { } func removeRule(ruleID, deploymentID string, client *api.API) error { - res, err := trafficfilterapi.Get(trafficfilterapi.GetParams{ + res, err := GetAssociation(trafficfilterapi.GetParams{ API: client, ID: ruleID, IncludeAssociations: true, }) @@ -184,7 +190,7 @@ func removeRule(ruleID, deploymentID string, client *api.API) error { // If the rule is found, then delete the association. for _, assoc := range res.Associations { if deploymentID == *assoc.ID { - return trafficfilterapi.DeleteAssociation(trafficfilterapi.DeleteAssociationParams{ + return DeleteAssociation(trafficfilterapi.DeleteAssociationParams{ API: client, ID: ruleID, EntityID: *assoc.ID, diff --git a/ec/ecresource/deploymentresource/update_test.go b/ec/ecresource/deploymentresource/update_test.go new file mode 100644 index 000000000..a1e294c8b --- /dev/null +++ b/ec/ecresource/deploymentresource/update_test.go @@ -0,0 +1,214 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package deploymentresource_test + +import ( + "context" + "fmt" + "testing" + + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource" + v2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/deployment/v2" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/stretchr/testify/assert" +) + +func Test_handleTrafficFilterChange(t *testing.T) { + deploymentID := "deployment_unique_id" + + type args struct { + plan []string + state []string + } + + tests := []struct { + name string + args args + getRule func(trafficfilterapi.GetParams) (*models.TrafficFilterRulesetInfo, error) + createRule func(params trafficfilterapi.CreateAssociationParams) error + deleteRule func(params trafficfilterapi.DeleteAssociationParams) error + }{ + { + name: "should not call the association API when plan and state contain same rules", + args: args{ + plan: []string{"rule1"}, + state: []string{"rule1"}, + }, + getRule: func(trafficfilterapi.GetParams) (*models.TrafficFilterRulesetInfo, error) { + err := "GetRule function SHOULD NOT be called" + t.Errorf(err) + return nil, fmt.Errorf(err) + }, + createRule: func(params trafficfilterapi.CreateAssociationParams) error { + err := "CreateRule function SHOULD NOT be called" + t.Errorf(err) + return fmt.Errorf(err) + }, + deleteRule: func(params trafficfilterapi.DeleteAssociationParams) error { + err := "DeleteRule function SHOULD NOT be called" + t.Errorf(err) + return fmt.Errorf(err) + }, + }, + + { + name: "should add rule when plan contains it and state doesn't contain it", + args: args{ + plan: []string{"rule1", "rule2"}, + state: []string{"rule1"}, + }, + getRule: func(trafficfilterapi.GetParams) (*models.TrafficFilterRulesetInfo, error) { + return &models.TrafficFilterRulesetInfo{}, nil + }, + createRule: func(params trafficfilterapi.CreateAssociationParams) error { + assert.Equal(t, "rule2", params.ID) + return nil + }, + deleteRule: func(params trafficfilterapi.DeleteAssociationParams) error { + err := "DeleteRule function SHOULD NOT be called" + t.Errorf(err) + return fmt.Errorf(err) + }, + }, + + { + name: "should not add rule when plan contains it and state doesn't contain it but the association already exists", + args: args{ + plan: []string{"rule1", "rule2"}, + state: []string{"rule1"}, + }, + getRule: func(trafficfilterapi.GetParams) (*models.TrafficFilterRulesetInfo, error) { + return &models.TrafficFilterRulesetInfo{ + Associations: []*models.FilterAssociation{ + { + ID: &deploymentID, + EntityType: ec.String("deployment"), + }, + }, + }, nil + }, + createRule: func(params trafficfilterapi.CreateAssociationParams) error { + err := "CreateRule function SHOULD NOT be called" + t.Errorf(err) + return fmt.Errorf(err) + }, + deleteRule: func(params trafficfilterapi.DeleteAssociationParams) error { + err := "DeleteRule function SHOULD NOT be called" + t.Errorf(err) + return fmt.Errorf(err) + }, + }, + + { + name: "should delete rule when plan doesn't contain it and state does contain it", + args: args{ + plan: []string{"rule1"}, + state: []string{"rule1", "rule2"}, + }, + getRule: func(trafficfilterapi.GetParams) (*models.TrafficFilterRulesetInfo, error) { + return &models.TrafficFilterRulesetInfo{ + Associations: []*models.FilterAssociation{ + { + ID: &deploymentID, + EntityType: ec.String("deployment"), + }, + }, + }, nil + }, + createRule: func(params trafficfilterapi.CreateAssociationParams) error { + err := "CreateRule function SHOULD NOT be called" + t.Errorf(err) + return fmt.Errorf(err) + }, + deleteRule: func(params trafficfilterapi.DeleteAssociationParams) error { + assert.Equal(t, "rule2", params.ID) + return nil + }, + }, + + { + name: "should not delete rule when plan doesn't contain it and state does contain it but the association is already gone", + args: args{ + plan: []string{"rule1"}, + state: []string{"rule1", "rule2"}, + }, + getRule: func(trafficfilterapi.GetParams) (*models.TrafficFilterRulesetInfo, error) { + return &models.TrafficFilterRulesetInfo{}, nil + }, + createRule: func(params trafficfilterapi.CreateAssociationParams) error { + err := "CreateRule function SHOULD NOT be called" + t.Errorf(err) + return fmt.Errorf(err) + }, + deleteRule: func(params trafficfilterapi.DeleteAssociationParams) error { + err := "DeleteRule function SHOULD NOT be called" + t.Errorf(err) + return fmt.Errorf(err) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + getRule := deploymentresource.GetAssociation + createRule := deploymentresource.CreateAssociation + deleteRule := deploymentresource.DeleteAssociation + + defer func() { + deploymentresource.GetAssociation = getRule + deploymentresource.CreateAssociation = createRule + deploymentresource.DeleteAssociation = deleteRule + }() + + deploymentresource.GetAssociation = tt.getRule + deploymentresource.CreateAssociation = tt.createRule + deploymentresource.DeleteAssociation = tt.deleteRule + + plan := v2.Deployment{ + Id: deploymentID, + TrafficFilter: tt.args.plan, + } + + state := v2.Deployment{ + Id: deploymentID, + TrafficFilter: tt.args.state, + } + + var planTF v2.DeploymentTF + + diags := tfsdk.ValueFrom(context.Background(), &plan, v2.DeploymentSchema().Type(), &planTF) + + assert.Nil(t, diags) + + var stateTF v2.DeploymentTF + + diags = tfsdk.ValueFrom(context.Background(), &state, v2.DeploymentSchema().Type(), &stateTF) + + assert.Nil(t, diags) + + diags = deploymentresource.HandleTrafficFilterChange(context.Background(), nil, planTF, stateTF) + + assert.Nil(t, diags) + }) + + } + +} From 6a92321a8ddf81faef3abb79cd86f2e6235c95bc Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 28 Dec 2022 12:33:15 +0100 Subject: [PATCH 060/104] move files from utils to dedicated resource packages --- .../deploymentresource/apm/v2/apm_read.go | 9 +- .../apm/v2/apm_read_test.go | 32 ++++ .../v2/deployment_create_payload.go | 3 +- .../v2/deployment_create_payload_test.go | 39 ++-- .../deployment/v2/deployment_read.go | 125 ++++++++++++- .../deployment/v2/deployment_read_test.go | 126 ++++++++++++- .../deployment/v2/deployment_test_utils.go | 60 ++++++ .../v2/deployment_update_payload.go | 3 +- .../v2/deployment_update_payload_test.go | 29 ++- .../elasticsearch/v2/elasticsearch_payload.go | 103 ++++++++++- .../v2/elasticsearch_payload_test.go | 63 ++++--- .../elasticsearch/v2/elasticsearch_read.go | 11 +- .../v2/elasticsearch_read_test.go | 32 ++++ .../v2/elasticsearch_test_utils.go | 46 +++++ .../v2/elasticsearch_topology.go | 6 - .../v2/node_roles_plan_modifier.go | 3 +- .../v2/node_types_plan_modifier.go | 3 +- .../v2/enterprise_search_read.go | 11 +- .../v2/enterprise_search_read_test.go | 32 ++++ .../v2/integrations_server_read.go | 9 +- .../v2/integrations_server_read_test.go | 32 ++++ .../kibana/v2/kibana_read.go | 9 +- .../kibana/v2/kibana_read_test.go | 32 ++++ ec/ecresource/deploymentresource/read.go | 38 +++- .../{utils/getters_test.go => read_test.go} | 113 +----------- .../testutil/testutil_func.go | 53 ------ .../deploymentresource/utils/definitions.go | 6 + .../utils/enrich_elasticsearch_template.go | 59 ------ .../deploymentresource/utils/getters.go | 174 ------------------ .../utils/node_types_to_node_roles.go | 89 --------- .../utils/stopped_resource.go | 50 ----- .../utils/stopped_resource_test.go | 155 ---------------- 32 files changed, 754 insertions(+), 801 deletions(-) create mode 100644 ec/ecresource/deploymentresource/deployment/v2/deployment_test_utils.go create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_test_utils.go rename ec/ecresource/deploymentresource/{utils/getters_test.go => read_test.go} (51%) delete mode 100644 ec/ecresource/deploymentresource/utils/enrich_elasticsearch_template.go delete mode 100644 ec/ecresource/deploymentresource/utils/getters.go delete mode 100644 ec/ecresource/deploymentresource/utils/node_types_to_node_roles.go delete mode 100644 ec/ecresource/deploymentresource/utils/stopped_resource.go delete mode 100644 ec/ecresource/deploymentresource/utils/stopped_resource_test.go diff --git a/ec/ecresource/deploymentresource/apm/v2/apm_read.go b/ec/ecresource/deploymentresource/apm/v2/apm_read.go index 42c3a550c..23977e43f 100644 --- a/ec/ecresource/deploymentresource/apm/v2/apm_read.go +++ b/ec/ecresource/deploymentresource/apm/v2/apm_read.go @@ -19,7 +19,6 @@ package v2 import ( "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" "github.com/elastic/terraform-provider-ec/ec/internal/converters" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) @@ -40,7 +39,7 @@ type Apm struct { func ReadApms(in []*models.ApmResourceInfo) (*Apm, error) { for _, model := range in { - if util.IsCurrentApmPlanEmpty(model) || utils.IsApmResourceStopped(model) { + if util.IsCurrentApmPlanEmpty(model) || IsApmStopped(model) { continue } @@ -90,3 +89,9 @@ func ReadApm(in *models.ApmResourceInfo) (*Apm, error) { return &apm, nil } + +// IsApmStopped returns true if the resource is stopped. +func IsApmStopped(res *models.ApmResourceInfo) bool { + return res == nil || res.Info == nil || res.Info.Status == nil || + *res.Info.Status == "stopped" +} diff --git a/ec/ecresource/deploymentresource/apm/v2/apm_read_test.go b/ec/ecresource/deploymentresource/apm/v2/apm_read_test.go index a4f974c18..0438c506d 100644 --- a/ec/ecresource/deploymentresource/apm/v2/apm_read_test.go +++ b/ec/ecresource/deploymentresource/apm/v2/apm_read_test.go @@ -309,3 +309,35 @@ func Test_readApm(t *testing.T) { }) } } + +func Test_IsApmResourceStopped(t *testing.T) { + type args struct { + res *models.ApmResourceInfo + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "started resource returns false", + args: args{res: &models.ApmResourceInfo{Info: &models.ApmInfo{ + Status: ec.String("started"), + }}}, + want: false, + }, + { + name: "stopped resource returns true", + args: args{res: &models.ApmResourceInfo{Info: &models.ApmInfo{ + Status: ec.String("stopped"), + }}}, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := IsApmStopped(tt.args.res) + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload.go index fb8fc1d35..d62aeb4cd 100644 --- a/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload.go +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload.go @@ -31,7 +31,6 @@ import ( integrationsserverv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/integrationsserver/v2" kibanav2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v2" observabilityv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v2" - "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" "github.com/elastic/terraform-provider-ec/ec/internal/converters" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" @@ -84,7 +83,7 @@ func (dep DeploymentTF) CreateRequest(ctx context.Context, client *api.API) (*mo return nil, diagsnostics } - useNodeRoles, err := utils.CompatibleWithNodeRoles(version) + useNodeRoles, err := elasticsearchv2.CompatibleWithNodeRoles(version) if err != nil { diagsnostics.AddError("Deployment parse error", err.Error()) return nil, diagsnostics diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload_test.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload_test.go index 16c8bf342..1e9b411d5 100644 --- a/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload_test.go +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload_test.go @@ -33,7 +33,6 @@ import ( enterprisesearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v2" kibanav2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v2" observabilityv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v2" - "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/testutil" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/stretchr/testify/assert" @@ -274,7 +273,7 @@ func Test_createRequest(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentCreateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, hotWarmTpl(), true), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, hotWarmTpl(), true), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -473,7 +472,7 @@ func Test_createRequest(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentCreateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -625,7 +624,7 @@ func Test_createRequest(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentCreateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("es-ref-id"), Settings: &models.ElasticsearchClusterSettings{ @@ -764,7 +763,7 @@ func Test_createRequest(t *testing.T) { // Ref ids are taken from template, not from defaults values in this test. // Defaults are processed by TF during config processing. Resources: &models.DeploymentCreateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("es-ref-id"), Settings: &models.ElasticsearchClusterSettings{ @@ -929,7 +928,7 @@ func Test_createRequest(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentCreateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1085,7 +1084,7 @@ func Test_createRequest(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentCreateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1216,7 +1215,7 @@ func Test_createRequest(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentCreateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, hotWarmTpl(), false), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, hotWarmTpl(), false), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("es-ref-id"), Settings: &models.ElasticsearchClusterSettings{ @@ -1337,7 +1336,7 @@ func Test_createRequest(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentCreateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, hotWarmTpl(), true), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, hotWarmTpl(), true), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("es-ref-id"), Settings: &models.ElasticsearchClusterSettings{ @@ -1488,7 +1487,7 @@ func Test_createRequest(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentCreateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, hotWarmTpl(), true), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, hotWarmTpl(), true), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1636,7 +1635,7 @@ func Test_createRequest(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentCreateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1792,7 +1791,7 @@ func Test_createRequest(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentCreateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1951,7 +1950,7 @@ func Test_createRequest(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentCreateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -2129,7 +2128,7 @@ func Test_createRequest(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentCreateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -2315,7 +2314,7 @@ func Test_createRequest(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentCreateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -2521,7 +2520,7 @@ func Test_createRequest(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentCreateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -2714,7 +2713,7 @@ func Test_createRequest(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentCreateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -2877,7 +2876,7 @@ func Test_createRequest(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentCreateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ccsTpl(), false), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ccsTpl(), false), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{}, @@ -2972,7 +2971,7 @@ func Test_createRequest(t *testing.T) { {Key: ec.String("owner"), Value: ec.String("elastic")}, }}, Resources: &models.DeploymentCreateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -3054,7 +3053,7 @@ func Test_createRequest(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentCreateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_read.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_read.go index 0a46ca900..2c0040928 100644 --- a/ec/ecresource/deploymentresource/deployment/v2/deployment_read.go +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_read.go @@ -19,8 +19,11 @@ package v2 import ( "context" + "errors" "fmt" + "strings" + "github.com/blang/semver" "github.com/elastic/cloud-sdk-go/pkg/models" apmv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v2" @@ -31,6 +34,7 @@ import ( observabilityv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v2" "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/elastic/terraform-provider-ec/ec/internal/util" "github.com/hashicorp/terraform-plugin-framework/types" ) @@ -118,21 +122,21 @@ func ReadDeployment(res *models.DeploymentGetResponse, remotes *models.RemoteRes return nil, nil } - templateID, err := utils.GetDeploymentTemplateID(res.Resources) + templateID, err := getDeploymentTemplateID(res.Resources) if err != nil { return nil, err } dep.DeploymentTemplateId = templateID - dep.Region = utils.GetRegion(res.Resources) + dep.Region = getRegion(res.Resources) // We're reconciling the version and storing the lowest version of any // of the deployment resources. This ensures that if an upgrade fails, // the state version will be lower than the desired version, making // retries possible. Once more resource types are added, the function // needs to be modified to check those as well. - version, err := utils.GetLowestVersion(res.Resources) + version, err := getLowestVersion(res.Resources) if err != nil { // This code path is highly unlikely, but we're bubbling up the // error in case one of the versions isn't parseable by semver. @@ -239,3 +243,118 @@ func (dep *Deployment) SetCredentialsIfEmpty(state *DeploymentTF) { dep.ApmSecretToken = &state.ApmSecretToken.Value } } + +func getLowestVersion(res *models.DeploymentResources) (string, error) { + // We're starting off with a very high version so it can be replaced. + replaceVersion := `99.99.99` + version := semver.MustParse(replaceVersion) + for _, r := range res.Elasticsearch { + if !util.IsCurrentEsPlanEmpty(r) { + v := r.Info.PlanInfo.Current.Plan.Elasticsearch.Version + if err := swapLowerVersion(&version, v); err != nil && !elasticsearchv2.IsElasticsearchStopped(r) { + return "", fmt.Errorf("elasticsearch version '%s' is not semver compliant: %w", v, err) + } + } + } + + for _, r := range res.Kibana { + if !util.IsCurrentKibanaPlanEmpty(r) { + v := r.Info.PlanInfo.Current.Plan.Kibana.Version + if err := swapLowerVersion(&version, v); err != nil && !kibanav2.IsKibanaStopped(r) { + return version.String(), fmt.Errorf("kibana version '%s' is not semver compliant: %w", v, err) + } + } + } + + for _, r := range res.Apm { + if !util.IsCurrentApmPlanEmpty(r) { + v := r.Info.PlanInfo.Current.Plan.Apm.Version + if err := swapLowerVersion(&version, v); err != nil && !apmv2.IsApmStopped(r) { + return version.String(), fmt.Errorf("apm version '%s' is not semver compliant: %w", v, err) + } + } + } + + for _, r := range res.IntegrationsServer { + if !util.IsCurrentIntegrationsServerPlanEmpty(r) { + v := r.Info.PlanInfo.Current.Plan.IntegrationsServer.Version + if err := swapLowerVersion(&version, v); err != nil && !integrationsserverv2.IsIntegrationsServerStopped(r) { + return version.String(), fmt.Errorf("integrations_server version '%s' is not semver compliant: %w", v, err) + } + } + } + + for _, r := range res.EnterpriseSearch { + if !util.IsCurrentEssPlanEmpty(r) { + v := r.Info.PlanInfo.Current.Plan.EnterpriseSearch.Version + if err := swapLowerVersion(&version, v); err != nil && !enterprisesearchv2.IsEnterpriseSearchStopped(r) { + return version.String(), fmt.Errorf("enterprise search version '%s' is not semver compliant: %w", v, err) + } + } + } + + if version.String() != replaceVersion { + return version.String(), nil + } + return "", errors.New("unable to determine the lowest version for any the deployment components") +} + +func swapLowerVersion(version *semver.Version, comp string) error { + if comp == "" { + return nil + } + + v, err := semver.Parse(comp) + if err != nil { + return err + } + if v.LT(*version) { + *version = v + } + return nil +} + +func getRegion(res *models.DeploymentResources) string { + for _, r := range res.Elasticsearch { + if r.Region != nil && *r.Region != "" { + return *r.Region + } + } + + return "" +} + +func getDeploymentTemplateID(res *models.DeploymentResources) (string, error) { + var deploymentTemplateID string + var foundTemplates []string + for _, esRes := range res.Elasticsearch { + if util.IsCurrentEsPlanEmpty(esRes) { + continue + } + + var emptyDT = esRes.Info.PlanInfo.Current.Plan.DeploymentTemplate == nil + if emptyDT { + continue + } + + if deploymentTemplateID == "" { + deploymentTemplateID = *esRes.Info.PlanInfo.Current.Plan.DeploymentTemplate.ID + } + + foundTemplates = append(foundTemplates, + *esRes.Info.PlanInfo.Current.Plan.DeploymentTemplate.ID, + ) + } + + if deploymentTemplateID == "" { + return "", errors.New("failed to obtain the deployment template id") + } + + if len(foundTemplates) > 1 { + return "", fmt.Errorf( + "there are more than 1 deployment templates specified on the deployment: \"%s\"", strings.Join(foundTemplates, ", "), + ) + } + + return deploymentTemplateID, nil +} diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_read_test.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_read_test.go index 390a3f629..d70fe9c48 100644 --- a/ec/ecresource/deploymentresource/deployment/v2/deployment_read_test.go +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_read_test.go @@ -18,6 +18,7 @@ package v2 import ( + "errors" "testing" "github.com/elastic/cloud-sdk-go/pkg/api/mock" @@ -28,7 +29,6 @@ import ( enterprisesearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v2" kibanav2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v2" observabilityv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v2" - "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/testutil" "github.com/stretchr/testify/assert" ) @@ -466,7 +466,7 @@ func Test_readDeployment(t *testing.T) { { name: "flattens an azure plan (io-optimized)", args: args{ - res: testutil.OpenDeploymentGet(t, "../../testdata/deployment-azure-io-optimized.json"), + res: deploymentGetResponseFromFile(t, "../../testdata/deployment-azure-io-optimized.json"), }, want: Deployment{ Id: "123e79d8109c4a0790b0b333110bf715", @@ -528,7 +528,7 @@ func Test_readDeployment(t *testing.T) { { name: "flattens an aws plan (io-optimized)", - args: args{res: testutil.OpenDeploymentGet(t, "../../testdata/deployment-aws-io-optimized.json")}, + args: args{res: deploymentGetResponseFromFile(t, "../../testdata/deployment-aws-io-optimized.json")}, want: Deployment{ Id: "123365f2805e46808d40849b1c0b266b", Alias: "my-deployment", @@ -590,7 +590,7 @@ func Test_readDeployment(t *testing.T) { { name: "flattens an aws plan with extensions (io-optimized)", args: args{ - res: testutil.OpenDeploymentGet(t, "../../testdata/deployment-aws-io-optimized-extension.json"), + res: deploymentGetResponseFromFile(t, "../../testdata/deployment-aws-io-optimized-extension.json"), }, want: Deployment{ Id: "123365f2805e46808d40849b1c0b266b", @@ -1043,7 +1043,7 @@ func Test_readDeployment(t *testing.T) { { name: "flattens an aws plan (io-optimized) with tags", - args: args{res: testutil.OpenDeploymentGet(t, "../../testdata/deployment-aws-io-optimized-tags.json")}, + args: args{res: deploymentGetResponseFromFile(t, "../../testdata/deployment-aws-io-optimized-tags.json")}, want: Deployment{ Id: "123365f2805e46808d40849b1c0b266b", Alias: "my-deployment", @@ -1109,7 +1109,7 @@ func Test_readDeployment(t *testing.T) { { name: "flattens a gcp plan (io-optimized)", - args: args{res: testutil.OpenDeploymentGet(t, "../../testdata/deployment-gcp-io-optimized.json")}, + args: args{res: deploymentGetResponseFromFile(t, "../../testdata/deployment-gcp-io-optimized.json")}, want: Deployment{ Id: "1239e402d6df471ea374bd68e3f91cc5", Alias: "my-deployment", @@ -1170,7 +1170,7 @@ func Test_readDeployment(t *testing.T) { { name: "flattens a gcp plan with autoscale set (io-optimized)", - args: args{res: testutil.OpenDeploymentGet(t, "../../testdata/deployment-gcp-io-optimized-autoscale.json")}, + args: args{res: deploymentGetResponseFromFile(t, "../../testdata/deployment-gcp-io-optimized-autoscale.json")}, want: Deployment{ Id: "1239e402d6df471ea374bd68e3f91cc5", Alias: "", @@ -1282,7 +1282,7 @@ func Test_readDeployment(t *testing.T) { { name: "flattens a gcp plan (hot-warm)", - args: args{res: testutil.OpenDeploymentGet(t, "../../testdata/deployment-gcp-hot-warm.json")}, + args: args{res: deploymentGetResponseFromFile(t, "../../testdata/deployment-gcp-hot-warm.json")}, want: Deployment{ Id: "123d148423864552aa57b59929d4bf4d", Name: "up2d-hot-warm", @@ -1370,7 +1370,7 @@ func Test_readDeployment(t *testing.T) { { name: "flattens a gcp plan (hot-warm) with node_roles", - args: args{res: testutil.OpenDeploymentGet(t, "../../testdata/deployment-gcp-hot-warm-node_roles.json")}, + args: args{res: deploymentGetResponseFromFile(t, "../../testdata/deployment-gcp-hot-warm-node_roles.json")}, want: Deployment{ Id: "123d148423864552aa57b59929d4bf4d", Name: "up2d-hot-warm", @@ -1482,7 +1482,7 @@ func Test_readDeployment(t *testing.T) { { name: "flattens an aws plan (Cross Cluster Search)", args: args{ - res: testutil.OpenDeploymentGet(t, "../../testdata/deployment-aws-ccs.json"), + res: deploymentGetResponseFromFile(t, "../../testdata/deployment-aws-ccs.json"), remotes: models.RemoteResources{Resources: []*models.RemoteResourceRef{ { Alias: ec.String("alias"), @@ -1566,3 +1566,109 @@ func Test_readDeployment(t *testing.T) { }) } } + +func Test_getDeploymentTemplateID(t *testing.T) { + type args struct { + res *models.DeploymentResources + } + tests := []struct { + name string + args args + want string + err error + }{ + { + name: "empty resources returns an error", + args: args{res: &models.DeploymentResources{}}, + err: errors.New("failed to obtain the deployment template id"), + }, + { + name: "single empty current plan returns error", + args: args{res: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{ + { + Info: &models.ElasticsearchClusterInfo{ + PlanInfo: &models.ElasticsearchClusterPlansInfo{ + Pending: &models.ElasticsearchClusterPlanInfo{ + Plan: &models.ElasticsearchClusterPlan{ + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized"), + }, + }, + }, + }, + }, + }, + }, + }}, + err: errors.New("failed to obtain the deployment template id"), + }, + { + name: "multiple deployment templates returns an error", + args: args{res: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{ + { + Info: &models.ElasticsearchClusterInfo{ + PlanInfo: &models.ElasticsearchClusterPlansInfo{ + Current: &models.ElasticsearchClusterPlanInfo{ + Plan: &models.ElasticsearchClusterPlan{ + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("someid"), + }, + }, + }, + }, + }, + }, + { + Info: &models.ElasticsearchClusterInfo{ + PlanInfo: &models.ElasticsearchClusterPlansInfo{ + Current: &models.ElasticsearchClusterPlanInfo{ + Plan: &models.ElasticsearchClusterPlan{ + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("someotherid"), + }, + }, + }, + }, + }, + }, + }, + }}, + err: errors.New("there are more than 1 deployment templates specified on the deployment: \"someid, someotherid\""), + }, + { + name: "single deployment template returns it", + args: args{res: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{ + { + Info: &models.ElasticsearchClusterInfo{ + PlanInfo: &models.ElasticsearchClusterPlansInfo{ + Current: &models.ElasticsearchClusterPlanInfo{ + Plan: &models.ElasticsearchClusterPlan{ + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized"), + }, + }, + }, + }, + }, + }, + }, + }}, + want: "aws-io-optimized", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := getDeploymentTemplateID(tt.args.res) + if tt.err != nil { + assert.EqualError(t, err, tt.err.Error()) + } else { + assert.NoError(t, err) + } + + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_test_utils.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_test_utils.go new file mode 100644 index 000000000..74e0dc03d --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_test_utils.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "encoding/json" + "io" + "os" + "testing" + + "github.com/elastic/cloud-sdk-go/pkg/models" + + elasticsearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" +) + +func elasticsearchPayloadFromReader(t *testing.T, rc io.Reader, useNodeRoles bool) *models.ElasticsearchPayload { + t.Helper() + + var tpl models.DeploymentTemplateInfoV2 + if err := json.NewDecoder(rc).Decode(&tpl); err != nil { + t.Fatal(err) + } + + return elasticsearchv2.EnrichElasticsearchTemplate( + tpl.DeploymentTemplate.Resources.Elasticsearch[0], + *tpl.ID, + "", + useNodeRoles, + ) +} + +func deploymentGetResponseFromFile(t *testing.T, filename string) *models.DeploymentGetResponse { + t.Helper() + f, err := os.Open(filename) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + var res models.DeploymentGetResponse + if err := json.NewDecoder(f).Decode(&res); err != nil { + t.Fatal(err) + } + return &res +} diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload.go index 5a0501481..9dab746db 100644 --- a/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload.go +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload.go @@ -31,7 +31,6 @@ import ( integrationsserverv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/integrationsserver/v2" kibanav2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v2" observabilityv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v2" - "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" "github.com/elastic/terraform-provider-ec/ec/internal/converters" "github.com/hashicorp/terraform-plugin-framework/diag" ) @@ -71,7 +70,7 @@ func (plan DeploymentTF) UpdateRequest(ctx context.Context, client *api.API, sta // This might not be necessary going forward as we move to // tiered Elasticsearch nodes. - useNodeRoles, diags := utils.UseNodeRoles(state.Version, plan.Version) + useNodeRoles, diags := elasticsearchv2.UseNodeRoles(state.Version, plan.Version) if diags.HasError() { return nil, diags diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload_test.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload_test.go index 5846390e2..5056f7651 100644 --- a/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload_test.go +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload_test.go @@ -30,7 +30,6 @@ import ( enterprisesearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v2" kibanav2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v2" observabilityv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v2" - "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/testutil" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/stretchr/testify/assert" @@ -192,7 +191,7 @@ func Test_updateResourceToModel(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentUpdateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -342,7 +341,7 @@ func Test_updateResourceToModel(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentUpdateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("es-ref-id"), Settings: &models.ElasticsearchClusterSettings{ @@ -500,7 +499,7 @@ func Test_updateResourceToModel(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentUpdateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -632,7 +631,7 @@ func Test_updateResourceToModel(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentUpdateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, hotWarmTpl(), false), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, hotWarmTpl(), false), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("es-ref-id"), Settings: &models.ElasticsearchClusterSettings{ @@ -834,7 +833,7 @@ func Test_updateResourceToModel(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentUpdateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ccsTpl(), false), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ccsTpl(), false), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{}, @@ -960,7 +959,7 @@ func Test_updateResourceToModel(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentUpdateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ccsTpl(), false), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ccsTpl(), false), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{}, @@ -1093,7 +1092,7 @@ func Test_updateResourceToModel(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentUpdateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1253,7 +1252,7 @@ func Test_updateResourceToModel(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentUpdateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1356,7 +1355,7 @@ func Test_updateResourceToModel(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentUpdateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1468,7 +1467,7 @@ func Test_updateResourceToModel(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentUpdateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1602,7 +1601,7 @@ func Test_updateResourceToModel(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentUpdateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1734,7 +1733,7 @@ func Test_updateResourceToModel(t *testing.T) { {Key: ec.String("owner"), Value: ec.String("elastic")}, }}, Resources: &models.DeploymentUpdateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1834,7 +1833,7 @@ func Test_updateResourceToModel(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentUpdateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1921,7 +1920,7 @@ func Test_updateResourceToModel(t *testing.T) { Tags: []*models.MetadataItem{}, }, Resources: &models.DeploymentUpdateResources{ - Elasticsearch: []*models.ElasticsearchPayload{testutil.EnrichWithEmptyTopologies(testutil.ReaderToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ Region: ec.String("us-east-1"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go index 90bc5f266..5d0931cc9 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go @@ -19,14 +19,16 @@ package v2 import ( "context" + "fmt" "strings" + "github.com/blang/semver" "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" - - "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" ) type ElasticsearchTF struct { @@ -68,7 +70,7 @@ func ElasticsearchPayload(ctx context.Context, esObj types.Object, template *mod return nil, nil } - templatePayload := utils.EnrichElasticsearchTemplate(utils.EsResource(template), dtID, version, useNodeRoles) + templatePayload := EnrichElasticsearchTemplate(payloadFromTemplate(template), dtID, version, useNodeRoles) payload, diags := es.payload(ctx, templatePayload, skipTopologies) if diags.HasError() { @@ -263,3 +265,98 @@ func elasticsearchStrategyPayload(strategy types.String, payload *models.Elastic } } } + +func payloadFromTemplate(template *models.DeploymentTemplateInfoV2) *models.ElasticsearchPayload { + if template == nil || len(template.DeploymentTemplate.Resources.Elasticsearch) == 0 { + return &models.ElasticsearchPayload{ + Plan: &models.ElasticsearchClusterPlan{ + Elasticsearch: &models.ElasticsearchConfiguration{}, + }, + Settings: &models.ElasticsearchClusterSettings{}, + } + } + return template.DeploymentTemplate.Resources.Elasticsearch[0] +} + +func EnrichElasticsearchTemplate(tpl *models.ElasticsearchPayload, templateId, version string, useNodeRoles bool) *models.ElasticsearchPayload { + if tpl.Plan.DeploymentTemplate == nil { + tpl.Plan.DeploymentTemplate = &models.DeploymentTemplateReference{} + } + + if tpl.Plan.DeploymentTemplate.ID == nil || *tpl.Plan.DeploymentTemplate.ID == "" { + tpl.Plan.DeploymentTemplate.ID = ec.String(templateId) + } + + if tpl.Plan.Elasticsearch.Version == "" { + tpl.Plan.Elasticsearch.Version = version + } + + for _, topology := range tpl.Plan.ClusterTopology { + if useNodeRoles { + topology.NodeType = nil + continue + } + topology.NodeRoles = nil + } + + return tpl +} + +func CompatibleWithNodeRoles(version string) (bool, error) { + deploymentVersion, err := semver.Parse(version) + if err != nil { + return false, fmt.Errorf("failed to parse Elasticsearch version: %w", err) + } + + return deploymentVersion.GE(utils.DataTiersVersion), nil +} + +func UseNodeRoles(stateVersion, planVersion types.String) (bool, diag.Diagnostics) { + + useNodeRoles, err := CompatibleWithNodeRoles(planVersion.Value) + + if err != nil { + var diags diag.Diagnostics + diags.AddError("Failed to determine whether to use node_roles", err.Error()) + return false, diags + } + + convertLegacy, diags := legacyToNodeRoles(stateVersion, planVersion) + + if diags.HasError() { + return false, diags + } + + return useNodeRoles && convertLegacy, nil +} + +// legacyToNodeRoles returns true when the legacy "node_type_*" should be +// migrated over to node_roles. Which will be true when: +// * The version field doesn't change. +// * The version field changes but: +// - The Elasticsearch.0.toplogy doesn't have any node_type_* set. +func legacyToNodeRoles(stateVersion, planVersion types.String) (bool, diag.Diagnostics) { + if stateVersion.Value == "" || stateVersion.Value == planVersion.Value { + return true, nil + } + + var diags diag.Diagnostics + oldVersion, err := semver.Parse(stateVersion.Value) + if err != nil { + diags.AddError("failed to parse previous Elasticsearch version", err.Error()) + return false, diags + } + newVersion, err := semver.Parse(planVersion.Value) + if err != nil { + diags.AddError("failed to parse new Elasticsearch version", err.Error()) + return false, diags + } + + // if the version change moves from non-node_roles to one + // that supports node roles, do not migrate on that step. + if oldVersion.LT(utils.DataTiersVersion) && newVersion.GE(utils.DataTiersVersion) { + return false, nil + } + + return true, nil +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go index 4895719b2..da5d7ba5d 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go @@ -31,14 +31,13 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/testutil" - "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" ) func Test_writeElasticsearch(t *testing.T) { tplPath := "../../testdata/template-aws-io-optimized-v2.json" tp770 := func() *models.ElasticsearchPayload { - return utils.EnrichElasticsearchTemplate( - utils.EsResource(testutil.ParseDeploymentTemplate(t, tplPath)), + return EnrichElasticsearchTemplate( + payloadFromTemplate(testutil.ParseDeploymentTemplate(t, tplPath)), "aws-io-optimized-v2", "7.7.0", false, @@ -46,8 +45,8 @@ func Test_writeElasticsearch(t *testing.T) { } create710 := func() *models.ElasticsearchPayload { - return utils.EnrichElasticsearchTemplate( - utils.EsResource(testutil.ParseDeploymentTemplate(t, tplPath)), + return EnrichElasticsearchTemplate( + payloadFromTemplate(testutil.ParseDeploymentTemplate(t, tplPath)), "aws-io-optimized-v2", "7.10.0", true, @@ -55,8 +54,8 @@ func Test_writeElasticsearch(t *testing.T) { } update711 := func() *models.ElasticsearchPayload { - return utils.EnrichElasticsearchTemplate( - utils.EsResource(testutil.ParseDeploymentTemplate(t, tplPath)), + return EnrichElasticsearchTemplate( + payloadFromTemplate(testutil.ParseDeploymentTemplate(t, tplPath)), "aws-io-optimized-v2", "7.11.0", true, @@ -65,8 +64,8 @@ func Test_writeElasticsearch(t *testing.T) { hotWarmTplPath := "../../testdata/template-aws-hot-warm-v2.json" hotWarmTpl770 := func() *models.ElasticsearchPayload { - return utils.EnrichElasticsearchTemplate( - utils.EsResource(testutil.ParseDeploymentTemplate(t, hotWarmTplPath)), + return EnrichElasticsearchTemplate( + payloadFromTemplate(testutil.ParseDeploymentTemplate(t, hotWarmTplPath)), "aws-io-optimized-v2", "7.7.0", false, @@ -74,8 +73,8 @@ func Test_writeElasticsearch(t *testing.T) { } hotWarm7111Tpl := func() *models.ElasticsearchPayload { - return utils.EnrichElasticsearchTemplate( - utils.EsResource(testutil.ParseDeploymentTemplate(t, hotWarmTplPath)), + return EnrichElasticsearchTemplate( + payloadFromTemplate(testutil.ParseDeploymentTemplate(t, hotWarmTplPath)), "aws-io-optimized-v2", "7.11.1", true, @@ -84,8 +83,8 @@ func Test_writeElasticsearch(t *testing.T) { eceDefaultTplPath := "../../testdata/template-ece-3.0.0-default.json" eceDefaultTpl := func() *models.ElasticsearchPayload { - return utils.EnrichElasticsearchTemplate( - utils.EsResource(testutil.ParseDeploymentTemplate(t, eceDefaultTplPath)), + return EnrichElasticsearchTemplate( + payloadFromTemplate(testutil.ParseDeploymentTemplate(t, eceDefaultTplPath)), "aws-io-optimized-v2", "7.17.3", true, @@ -123,7 +122,7 @@ func Test_writeElasticsearch(t *testing.T) { version: "7.7.0", useNodeRoles: false, }, - want: testutil.EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -189,7 +188,7 @@ func Test_writeElasticsearch(t *testing.T) { version: "7.10.0", useNodeRoles: true, }, - want: testutil.EnrichWithEmptyTopologies(create710(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(create710(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -259,7 +258,7 @@ func Test_writeElasticsearch(t *testing.T) { version: "7.11.0", useNodeRoles: true, }, - want: testutil.EnrichWithEmptyTopologies(update711(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(update711(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -318,7 +317,7 @@ func Test_writeElasticsearch(t *testing.T) { version: "7.7.0", useNodeRoles: false, }, - want: testutil.EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -389,7 +388,7 @@ func Test_writeElasticsearch(t *testing.T) { version: "7.7.0", useNodeRoles: false, }, - want: testutil.EnrichWithEmptyTopologies(hotWarmTpl770(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(hotWarmTpl770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -494,7 +493,7 @@ func Test_writeElasticsearch(t *testing.T) { version: "7.7.0", useNodeRoles: false, }, - want: testutil.EnrichWithEmptyTopologies(hotWarmTpl770(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(hotWarmTpl770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -587,7 +586,7 @@ func Test_writeElasticsearch(t *testing.T) { version: "7.7.0", useNodeRoles: false, }, - want: testutil.EnrichWithEmptyTopologies(hotWarmTpl770(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(hotWarmTpl770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -690,7 +689,7 @@ func Test_writeElasticsearch(t *testing.T) { version: "7.7.0", useNodeRoles: false, }, - want: testutil.EnrichWithEmptyTopologies(hotWarmTpl770(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(hotWarmTpl770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -798,7 +797,7 @@ func Test_writeElasticsearch(t *testing.T) { version: "7.11.1", useNodeRoles: true, }, - want: testutil.EnrichWithEmptyTopologies(hotWarm7111Tpl(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(hotWarm7111Tpl(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -931,7 +930,7 @@ func Test_writeElasticsearch(t *testing.T) { version: "7.11.1", useNodeRoles: true, }, - want: testutil.EnrichWithEmptyTopologies(hotWarm7111Tpl(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(hotWarm7111Tpl(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1081,7 +1080,7 @@ func Test_writeElasticsearch(t *testing.T) { version: "7.11.1", useNodeRoles: true, }, - want: testutil.EnrichWithEmptyTopologies(hotWarm7111Tpl(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(hotWarm7111Tpl(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1245,7 +1244,7 @@ func Test_writeElasticsearch(t *testing.T) { version: "7.17.3", useNodeRoles: true, }, - want: testutil.EnrichWithEmptyTopologies(eceDefaultTpl(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(eceDefaultTpl(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1367,7 +1366,7 @@ func Test_writeElasticsearch(t *testing.T) { version: "7.11.1", useNodeRoles: true, }, - want: testutil.EnrichWithEmptyTopologies(hotWarm7111Tpl(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(hotWarm7111Tpl(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1505,7 +1504,7 @@ func Test_writeElasticsearch(t *testing.T) { version: "7.7.0", useNodeRoles: false, }, - want: testutil.EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1582,7 +1581,7 @@ func Test_writeElasticsearch(t *testing.T) { version: "7.7.0", useNodeRoles: false, }, - want: testutil.EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1653,7 +1652,7 @@ func Test_writeElasticsearch(t *testing.T) { version: "7.7.0", useNodeRoles: false, }, - want: testutil.EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1725,7 +1724,7 @@ func Test_writeElasticsearch(t *testing.T) { version: "7.7.0", useNodeRoles: false, }, - want: testutil.EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1797,7 +1796,7 @@ func Test_writeElasticsearch(t *testing.T) { version: "7.7.0", useNodeRoles: false, }, - want: testutil.EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1870,7 +1869,7 @@ func Test_writeElasticsearch(t *testing.T) { useNodeRoles: false, }, - want: testutil.EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read.go index 6924e7c51..7ae4f8b14 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read.go @@ -20,7 +20,6 @@ package v2 import ( "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" "github.com/elastic/terraform-provider-ec/ec/internal/converters" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) @@ -51,7 +50,7 @@ type Elasticsearch struct { func ReadElasticsearches(in []*models.ElasticsearchResourceInfo, remotes *models.RemoteResources) (*Elasticsearch, error) { for _, model := range in { - if util.IsCurrentEsPlanEmpty(model) || utils.IsEsResourceStopped(model) { + if util.IsCurrentEsPlanEmpty(model) || IsElasticsearchStopped(model) { continue } es, err := readElasticsearch(model, remotes) @@ -67,7 +66,7 @@ func ReadElasticsearches(in []*models.ElasticsearchResourceInfo, remotes *models func readElasticsearch(in *models.ElasticsearchResourceInfo, remotes *models.RemoteResources) (*Elasticsearch, error) { var es Elasticsearch - if util.IsCurrentEsPlanEmpty(in) || utils.IsEsResourceStopped(in) { + if util.IsCurrentEsPlanEmpty(in) || IsElasticsearchStopped(in) { return &es, nil } @@ -157,3 +156,9 @@ func (es *Elasticsearch) setTopology(topologies ElasticsearchTopologies) { } } } + +// IsElasticsearchStopped returns true if the resource is stopped. +func IsElasticsearchStopped(res *models.ElasticsearchResourceInfo) bool { + return res == nil || res.Info == nil || res.Info.Status == nil || + *res.Info.Status == "stopped" +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go index 5f6b97324..5bc97e074 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go @@ -431,3 +431,35 @@ func Test_readElasticsearchConfig(t *testing.T) { }) } } + +func Test_IsEsResourceStopped(t *testing.T) { + type args struct { + res *models.ElasticsearchResourceInfo + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "started resource returns false", + args: args{res: &models.ElasticsearchResourceInfo{Info: &models.ElasticsearchClusterInfo{ + Status: ec.String("started"), + }}}, + want: false, + }, + { + name: "stopped resource returns true", + args: args{res: &models.ElasticsearchResourceInfo{Info: &models.ElasticsearchClusterInfo{ + Status: ec.String("stopped"), + }}}, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := IsElasticsearchStopped(tt.args.res) + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_test_utils.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_test_utils.go new file mode 100644 index 000000000..c007c4823 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_test_utils.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import "github.com/elastic/cloud-sdk-go/pkg/models" + +func CreateTierForTest(tierId string, tier ElasticsearchTopology) *ElasticsearchTopology { + res := tier + res.id = tierId + return &res +} + +func EnrichWithEmptyTopologies(tpl, want *models.ElasticsearchPayload) *models.ElasticsearchPayload { + tpl.DisplayName = want.DisplayName + tpl.RefID = want.RefID + tpl.Region = want.Region + tpl.Settings = want.Settings + tpl.Plan.AutoscalingEnabled = want.Plan.AutoscalingEnabled + tpl.Plan.Elasticsearch = want.Plan.Elasticsearch + tpl.Plan.Transient = want.Plan.Transient + + for i, t := range tpl.Plan.ClusterTopology { + for _, w := range want.Plan.ClusterTopology { + if t.ID == w.ID { + tpl.Plan.ClusterTopology[i] = w + } + } + } + + return tpl +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go index 8db932188..ce795c78c 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go @@ -64,12 +64,6 @@ type ElasticsearchTopology struct { Autoscaling *ElasticsearchTopologyAutoscaling `tfsdk:"autoscaling"` } -func CreateTierForTest(tierId string, tier ElasticsearchTopology) *ElasticsearchTopology { - res := tier - res.id = tierId - return &res -} - type ElasticsearchTopologyAutoscaling v1.ElasticsearchTopologyAutoscaling func (topology ElasticsearchTopologyTF) payload(ctx context.Context, topologyID string, planTopologies []*models.ElasticsearchClusterTopologyElement) diag.Diagnostics { diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier.go index 37940a66b..96b843e6a 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier.go @@ -20,7 +20,6 @@ package v2 import ( "context" - "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" @@ -80,7 +79,7 @@ func (r nodeRolesDefault) Modify(ctx context.Context, req tfsdk.ModifyAttributeP return } - useNodeRoles, diags := utils.UseNodeRoles(stateVersion, planVersion) + useNodeRoles, diags := UseNodeRoles(stateVersion, planVersion) if diags.HasError() { resp.Diagnostics.Append(diags...) diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier.go index a511428f8..d55529747 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier.go @@ -20,7 +20,6 @@ package v2 import ( "context" - "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/path" @@ -83,7 +82,7 @@ func (r nodeTypesDefault) Modify(ctx context.Context, req tfsdk.ModifyAttributeP return } - useNodeRoles, diags := utils.UseNodeRoles(stateVersion, planVersion) + useNodeRoles, diags := UseNodeRoles(stateVersion, planVersion) if diags.HasError() { resp.Diagnostics.Append(diags...) diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read.go index 9262a429b..913f72bef 100644 --- a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read.go +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read.go @@ -19,7 +19,6 @@ package v2 import ( "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" "github.com/elastic/terraform-provider-ec/ec/internal/converters" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) @@ -44,7 +43,7 @@ type EnterpriseSearch struct { type EnterpriseSearches []EnterpriseSearch func ReadEnterpriseSearch(in *models.EnterpriseSearchResourceInfo) (*EnterpriseSearch, error) { - if util.IsCurrentEssPlanEmpty(in) || utils.IsEssResourceStopped(in) { + if util.IsCurrentEssPlanEmpty(in) || IsEnterpriseSearchStopped(in) { return nil, nil } @@ -89,7 +88,7 @@ func ReadEnterpriseSearch(in *models.EnterpriseSearchResourceInfo) (*EnterpriseS func ReadEnterpriseSearches(in []*models.EnterpriseSearchResourceInfo) (*EnterpriseSearch, error) { for _, model := range in { - if util.IsCurrentEssPlanEmpty(model) || utils.IsEssResourceStopped(model) { + if util.IsCurrentEssPlanEmpty(model) || IsEnterpriseSearchStopped(model) { continue } @@ -103,3 +102,9 @@ func ReadEnterpriseSearches(in []*models.EnterpriseSearchResourceInfo) (*Enterpr return nil, nil } + +// IsEnterpriseSearchStopped returns true if the resource is stopped. +func IsEnterpriseSearchStopped(res *models.EnterpriseSearchResourceInfo) bool { + return res == nil || res.Info == nil || res.Info.Status == nil || + *res.Info.Status == "stopped" +} diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read_test.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read_test.go index fb10b3c84..564d0a93c 100644 --- a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read_test.go +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read_test.go @@ -184,3 +184,35 @@ func Test_readEnterpriseSearch(t *testing.T) { }) } } + +func Test_IsEnterpriseSearchStopped(t *testing.T) { + type args struct { + res *models.EnterpriseSearchResourceInfo + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "started resource returns false", + args: args{res: &models.EnterpriseSearchResourceInfo{Info: &models.EnterpriseSearchInfo{ + Status: ec.String("started"), + }}}, + want: false, + }, + { + name: "stopped resource returns true", + args: args{res: &models.EnterpriseSearchResourceInfo{Info: &models.EnterpriseSearchInfo{ + Status: ec.String("stopped"), + }}}, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := IsEnterpriseSearchStopped(tt.args.res) + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_read.go b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_read.go index d284000bb..ac9c698d9 100644 --- a/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_read.go +++ b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_read.go @@ -19,7 +19,6 @@ package v2 import ( "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" "github.com/elastic/terraform-provider-ec/ec/internal/converters" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) @@ -40,7 +39,7 @@ type IntegrationsServer struct { func ReadIntegrationsServers(in []*models.IntegrationsServerResourceInfo) (*IntegrationsServer, error) { for _, model := range in { - if util.IsCurrentIntegrationsServerPlanEmpty(model) || utils.IsIntegrationsServerResourceStopped(model) { + if util.IsCurrentIntegrationsServerPlanEmpty(model) || IsIntegrationsServerStopped(model) { continue } @@ -94,3 +93,9 @@ func readIntegrationsServer(in *models.IntegrationsServerResourceInfo) (*Integra return &srv, nil } + +// IsIntegrationsServerStopped returns true if the resource is stopped. +func IsIntegrationsServerStopped(res *models.IntegrationsServerResourceInfo) bool { + return res == nil || res.Info == nil || res.Info.Status == nil || + *res.Info.Status == "stopped" +} diff --git a/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_read_test.go b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_read_test.go index d9561e7b6..df718d069 100644 --- a/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_read_test.go +++ b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_read_test.go @@ -343,3 +343,35 @@ func Test_readIntegrationsServer(t *testing.T) { }) } } + +func Test_IsIntegrationsServerStopped(t *testing.T) { + type args struct { + res *models.IntegrationsServerResourceInfo + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "started resource returns false", + args: args{res: &models.IntegrationsServerResourceInfo{Info: &models.IntegrationsServerInfo{ + Status: ec.String("started"), + }}}, + want: false, + }, + { + name: "stopped resource returns true", + args: args{res: &models.IntegrationsServerResourceInfo{Info: &models.IntegrationsServerInfo{ + Status: ec.String("stopped"), + }}}, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := IsIntegrationsServerStopped(tt.args.res) + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/ec/ecresource/deploymentresource/kibana/v2/kibana_read.go b/ec/ecresource/deploymentresource/kibana/v2/kibana_read.go index 0fc03902f..3c5b47dd8 100644 --- a/ec/ecresource/deploymentresource/kibana/v2/kibana_read.go +++ b/ec/ecresource/deploymentresource/kibana/v2/kibana_read.go @@ -19,7 +19,6 @@ package v2 import ( "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" "github.com/elastic/terraform-provider-ec/ec/internal/converters" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) @@ -40,7 +39,7 @@ type Kibana struct { func ReadKibanas(in []*models.KibanaResourceInfo) (*Kibana, error) { for _, model := range in { - if util.IsCurrentKibanaPlanEmpty(model) || utils.IsKibanaResourceStopped(model) { + if util.IsCurrentKibanaPlanEmpty(model) || IsKibanaStopped(model) { continue } @@ -92,3 +91,9 @@ func readKibana(in *models.KibanaResourceInfo) (*Kibana, error) { return &kibana, nil } + +// IsKibanaStopped returns true if the resource is stopped. +func IsKibanaStopped(res *models.KibanaResourceInfo) bool { + return res == nil || res.Info == nil || res.Info.Status == nil || + *res.Info.Status == "stopped" +} diff --git a/ec/ecresource/deploymentresource/kibana/v2/kibana_read_test.go b/ec/ecresource/deploymentresource/kibana/v2/kibana_read_test.go index e1144ef48..3155dccf3 100644 --- a/ec/ecresource/deploymentresource/kibana/v2/kibana_read_test.go +++ b/ec/ecresource/deploymentresource/kibana/v2/kibana_read_test.go @@ -173,3 +173,35 @@ func Test_ReadKibana(t *testing.T) { }) } } + +func Test_IsKibanaResourceStopped(t *testing.T) { + type args struct { + res *models.KibanaResourceInfo + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "started resource returns false", + args: args{res: &models.KibanaResourceInfo{Info: &models.KibanaClusterInfo{ + Status: ec.String("started"), + }}}, + want: false, + }, + { + name: "stopped resource returns true", + args: args{res: &models.KibanaResourceInfo{Info: &models.KibanaClusterInfo{ + Status: ec.String("stopped"), + }}}, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := IsKibanaStopped(tt.args.res) + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/ec/ecresource/deploymentresource/read.go b/ec/ecresource/deploymentresource/read.go index 9714bfb14..326286946 100644 --- a/ec/ecresource/deploymentresource/read.go +++ b/ec/ecresource/deploymentresource/read.go @@ -29,9 +29,12 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/esremoteclustersapi" "github.com/elastic/cloud-sdk-go/pkg/client/deployments" "github.com/elastic/cloud-sdk-go/pkg/models" + apmv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v2" deploymentv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/deployment/v2" elasticsearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" - "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + enterprisesearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v2" + integrationsserverv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/integrationsserver/v2" + kibanav2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v2" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/tfsdk" @@ -91,7 +94,7 @@ func (r *Resource) read(ctx context.Context, id string, state *deploymentv2.Depl return nil, diags } - if !utils.HasRunningResources(response) { + if !HasRunningResources(response) { return nil, nil } @@ -201,3 +204,34 @@ func checkVersion(version string) error { return nil } + +func HasRunningResources(res *models.DeploymentGetResponse) bool { + if res.Resources != nil { + for _, r := range res.Resources.Elasticsearch { + if !elasticsearchv2.IsElasticsearchStopped(r) { + return true + } + } + for _, r := range res.Resources.Kibana { + if !kibanav2.IsKibanaStopped(r) { + return true + } + } + for _, r := range res.Resources.Apm { + if !apmv2.IsApmStopped(r) { + return true + } + } + for _, r := range res.Resources.EnterpriseSearch { + if !enterprisesearchv2.IsEnterpriseSearchStopped(r) { + return true + } + } + for _, r := range res.Resources.IntegrationsServer { + if !integrationsserverv2.IsIntegrationsServerStopped(r) { + return true + } + } + } + return false +} diff --git a/ec/ecresource/deploymentresource/utils/getters_test.go b/ec/ecresource/deploymentresource/read_test.go similarity index 51% rename from ec/ecresource/deploymentresource/utils/getters_test.go rename to ec/ecresource/deploymentresource/read_test.go index 9f9afb039..29e0de51c 100644 --- a/ec/ecresource/deploymentresource/utils/getters_test.go +++ b/ec/ecresource/deploymentresource/read_test.go @@ -15,123 +15,16 @@ // specific language governing permissions and limitations // under the License. -package utils +package deploymentresource_test import ( - "errors" "testing" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource" ) -func Test_getDeploymentTemplateID(t *testing.T) { - type args struct { - res *models.DeploymentResources - } - tests := []struct { - name string - args args - want string - err error - }{ - { - name: "empty resources returns an error", - args: args{res: &models.DeploymentResources{}}, - err: errors.New("failed to obtain the deployment template id"), - }, - { - name: "single empty current plan returns error", - args: args{res: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{ - { - Info: &models.ElasticsearchClusterInfo{ - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Pending: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized"), - }, - }, - }, - }, - }, - }, - }, - }}, - err: errors.New("failed to obtain the deployment template id"), - }, - { - name: "multiple deployment templates returns an error", - args: args{res: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{ - { - Info: &models.ElasticsearchClusterInfo{ - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Current: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("someid"), - }, - }, - }, - }, - }, - }, - { - Info: &models.ElasticsearchClusterInfo{ - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Current: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("someotherid"), - }, - }, - }, - }, - }, - }, - }, - }}, - err: errors.New("there are more than 1 deployment templates specified on the deployment: \"someid, someotherid\""), - }, - { - name: "single deployment template returns it", - args: args{res: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{ - { - Info: &models.ElasticsearchClusterInfo{ - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Current: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized"), - }, - }, - }, - }, - }, - }, - }, - }}, - want: "aws-io-optimized", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := GetDeploymentTemplateID(tt.args.res) - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } - - assert.Equal(t, tt.want, got) - }) - } -} - func Test_hasRunningResources(t *testing.T) { type args struct { res *models.DeploymentGetResponse @@ -198,7 +91,7 @@ func Test_hasRunningResources(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := HasRunningResources(tt.args.res); got != tt.want { + if got := deploymentresource.HasRunningResources(tt.args.res); got != tt.want { t.Errorf("hasRunningResources() = %v, want %v", got, tt.want) } }) diff --git a/ec/ecresource/deploymentresource/testutil/testutil_func.go b/ec/ecresource/deploymentresource/testutil/testutil_func.go index 879d1002a..3a1fbe739 100644 --- a/ec/ecresource/deploymentresource/testutil/testutil_func.go +++ b/ec/ecresource/deploymentresource/testutil/testutil_func.go @@ -19,12 +19,10 @@ package testutil import ( "encoding/json" - "io" "os" "testing" "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" ) // parseDeploymentTemplate is a test helper which parse a file by path and @@ -51,54 +49,3 @@ func ParseDeploymentTemplate(t *testing.T, name string) *models.DeploymentTempla return &res } - -func OpenDeploymentGet(t *testing.T, name string) *models.DeploymentGetResponse { - t.Helper() - f, err := os.Open(name) - if err != nil { - t.Fatal(err) - } - defer f.Close() - - var res models.DeploymentGetResponse - if err := json.NewDecoder(f).Decode(&res); err != nil { - t.Fatal(err) - } - return &res -} - -func EnrichWithEmptyTopologies(tpl, want *models.ElasticsearchPayload) *models.ElasticsearchPayload { - tpl.DisplayName = want.DisplayName - tpl.RefID = want.RefID - tpl.Region = want.Region - tpl.Settings = want.Settings - tpl.Plan.AutoscalingEnabled = want.Plan.AutoscalingEnabled - tpl.Plan.Elasticsearch = want.Plan.Elasticsearch - tpl.Plan.Transient = want.Plan.Transient - - for i, t := range tpl.Plan.ClusterTopology { - for _, w := range want.Plan.ClusterTopology { - if t.ID == w.ID { - tpl.Plan.ClusterTopology[i] = w - } - } - } - - return tpl -} - -func ReaderToESPayload(t *testing.T, rc io.Reader, nr bool) *models.ElasticsearchPayload { - t.Helper() - - var tpl models.DeploymentTemplateInfoV2 - if err := json.NewDecoder(rc).Decode(&tpl); err != nil { - t.Fatal(err) - } - - return utils.EnrichElasticsearchTemplate( - tpl.DeploymentTemplate.Resources.Elasticsearch[0], - *tpl.ID, - "", - nr, - ) -} diff --git a/ec/ecresource/deploymentresource/utils/definitions.go b/ec/ecresource/deploymentresource/utils/definitions.go index 5fea5e34c..29505cf2b 100644 --- a/ec/ecresource/deploymentresource/utils/definitions.go +++ b/ec/ecresource/deploymentresource/utils/definitions.go @@ -17,6 +17,12 @@ package utils +import "github.com/blang/semver" + const ( MinimumZoneCount = 1 ) + +var ( + DataTiersVersion = semver.MustParse("7.10.0") +) diff --git a/ec/ecresource/deploymentresource/utils/enrich_elasticsearch_template.go b/ec/ecresource/deploymentresource/utils/enrich_elasticsearch_template.go deleted file mode 100644 index 571282325..000000000 --- a/ec/ecresource/deploymentresource/utils/enrich_elasticsearch_template.go +++ /dev/null @@ -1,59 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package utils - -import ( - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" -) - -func EnrichElasticsearchTemplate(tpl *models.ElasticsearchPayload, templateId, version string, useNodeRoles bool) *models.ElasticsearchPayload { - if tpl.Plan.DeploymentTemplate == nil { - tpl.Plan.DeploymentTemplate = &models.DeploymentTemplateReference{} - } - - if tpl.Plan.DeploymentTemplate.ID == nil || *tpl.Plan.DeploymentTemplate.ID == "" { - tpl.Plan.DeploymentTemplate.ID = ec.String(templateId) - } - - if tpl.Plan.Elasticsearch.Version == "" { - tpl.Plan.Elasticsearch.Version = version - } - - for _, topology := range tpl.Plan.ClusterTopology { - if useNodeRoles { - topology.NodeType = nil - continue - } - topology.NodeRoles = nil - } - - return tpl -} - -func EsResource(res *models.DeploymentTemplateInfoV2) *models.ElasticsearchPayload { - if res == nil || len(res.DeploymentTemplate.Resources.Elasticsearch) == 0 { - return &models.ElasticsearchPayload{ - Plan: &models.ElasticsearchClusterPlan{ - Elasticsearch: &models.ElasticsearchConfiguration{}, - }, - Settings: &models.ElasticsearchClusterSettings{}, - } - } - return res.DeploymentTemplate.Resources.Elasticsearch[0] -} diff --git a/ec/ecresource/deploymentresource/utils/getters.go b/ec/ecresource/deploymentresource/utils/getters.go deleted file mode 100644 index 0c63ca93b..000000000 --- a/ec/ecresource/deploymentresource/utils/getters.go +++ /dev/null @@ -1,174 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package utils - -import ( - "errors" - "fmt" - "strings" - - "github.com/blang/semver" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -func HasRunningResources(res *models.DeploymentGetResponse) bool { - if res.Resources != nil { - for _, r := range res.Resources.Elasticsearch { - if !IsEsResourceStopped(r) { - return true - } - } - for _, r := range res.Resources.Kibana { - if !IsKibanaResourceStopped(r) { - return true - } - } - for _, r := range res.Resources.Apm { - if !IsApmResourceStopped(r) { - return true - } - } - for _, r := range res.Resources.EnterpriseSearch { - if !IsEssResourceStopped(r) { - return true - } - } - for _, r := range res.Resources.IntegrationsServer { - if !IsIntegrationsServerResourceStopped(r) { - return true - } - } - } - return false -} - -func GetDeploymentTemplateID(res *models.DeploymentResources) (string, error) { - var deploymentTemplateID string - var foundTemplates []string - for _, esRes := range res.Elasticsearch { - if util.IsCurrentEsPlanEmpty(esRes) { - continue - } - - var emptyDT = esRes.Info.PlanInfo.Current.Plan.DeploymentTemplate == nil - if emptyDT { - continue - } - - if deploymentTemplateID == "" { - deploymentTemplateID = *esRes.Info.PlanInfo.Current.Plan.DeploymentTemplate.ID - } - - foundTemplates = append(foundTemplates, - *esRes.Info.PlanInfo.Current.Plan.DeploymentTemplate.ID, - ) - } - - if deploymentTemplateID == "" { - return "", errors.New("failed to obtain the deployment template id") - } - - if len(foundTemplates) > 1 { - return "", fmt.Errorf( - "there are more than 1 deployment templates specified on the deployment: \"%s\"", strings.Join(foundTemplates, ", "), - ) - } - - return deploymentTemplateID, nil -} - -func GetRegion(res *models.DeploymentResources) string { - for _, r := range res.Elasticsearch { - if r.Region != nil && *r.Region != "" { - return *r.Region - } - } - - return "" -} - -func GetLowestVersion(res *models.DeploymentResources) (string, error) { - // We're starting off with a very high version so it can be replaced. - replaceVersion := `99.99.99` - version := semver.MustParse(replaceVersion) - for _, r := range res.Elasticsearch { - if !util.IsCurrentEsPlanEmpty(r) { - v := r.Info.PlanInfo.Current.Plan.Elasticsearch.Version - if err := swapLowerVersion(&version, v); err != nil && !IsEsResourceStopped(r) { - return "", fmt.Errorf("elasticsearch version '%s' is not semver compliant: %w", v, err) - } - } - } - - for _, r := range res.Kibana { - if !util.IsCurrentKibanaPlanEmpty(r) { - v := r.Info.PlanInfo.Current.Plan.Kibana.Version - if err := swapLowerVersion(&version, v); err != nil && !IsKibanaResourceStopped(r) { - return version.String(), fmt.Errorf("kibana version '%s' is not semver compliant: %w", v, err) - } - } - } - - for _, r := range res.Apm { - if !util.IsCurrentApmPlanEmpty(r) { - v := r.Info.PlanInfo.Current.Plan.Apm.Version - if err := swapLowerVersion(&version, v); err != nil && !IsApmResourceStopped(r) { - return version.String(), fmt.Errorf("apm version '%s' is not semver compliant: %w", v, err) - } - } - } - - for _, r := range res.IntegrationsServer { - if !util.IsCurrentIntegrationsServerPlanEmpty(r) { - v := r.Info.PlanInfo.Current.Plan.IntegrationsServer.Version - if err := swapLowerVersion(&version, v); err != nil && !IsIntegrationsServerResourceStopped(r) { - return version.String(), fmt.Errorf("integrations_server version '%s' is not semver compliant: %w", v, err) - } - } - } - - for _, r := range res.EnterpriseSearch { - if !util.IsCurrentEssPlanEmpty(r) { - v := r.Info.PlanInfo.Current.Plan.EnterpriseSearch.Version - if err := swapLowerVersion(&version, v); err != nil && !IsEssResourceStopped(r) { - return version.String(), fmt.Errorf("enterprise search version '%s' is not semver compliant: %w", v, err) - } - } - } - - if version.String() != replaceVersion { - return version.String(), nil - } - return "", errors.New("unable to determine the lowest version for any the deployment components") -} - -func swapLowerVersion(version *semver.Version, comp string) error { - if comp == "" { - return nil - } - - v, err := semver.Parse(comp) - if err != nil { - return err - } - if v.LT(*version) { - *version = v - } - return nil -} diff --git a/ec/ecresource/deploymentresource/utils/node_types_to_node_roles.go b/ec/ecresource/deploymentresource/utils/node_types_to_node_roles.go deleted file mode 100644 index b2bb7d2d7..000000000 --- a/ec/ecresource/deploymentresource/utils/node_types_to_node_roles.go +++ /dev/null @@ -1,89 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package utils - -import ( - "fmt" - - "github.com/blang/semver" - "github.com/hashicorp/terraform-plugin-framework/diag" - "github.com/hashicorp/terraform-plugin-framework/types" -) - -var ( - DataTiersVersion = semver.MustParse("7.10.0") -) - -func UseNodeRoles(stateVersion, planVersion types.String) (bool, diag.Diagnostics) { - - useNodeRoles, err := CompatibleWithNodeRoles(planVersion.Value) - - if err != nil { - var diags diag.Diagnostics - diags.AddError("Failed to determine whether to use node_roles", err.Error()) - return false, diags - } - - convertLegacy, diags := legacyToNodeRoles(stateVersion, planVersion) - - if diags.HasError() { - return false, diags - } - - return useNodeRoles && convertLegacy, nil -} - -func CompatibleWithNodeRoles(version string) (bool, error) { - deploymentVersion, err := semver.Parse(version) - if err != nil { - return false, fmt.Errorf("failed to parse Elasticsearch version: %w", err) - } - - return deploymentVersion.GE(DataTiersVersion), nil -} - -// legacyToNodeRoles returns true when the legacy "node_type_*" should be -// migrated over to node_roles. Which will be true when: -// * The version field doesn't change. -// * The version field changes but: -// - The Elasticsearch.0.toplogy doesn't have any node_type_* set. -func legacyToNodeRoles(stateVersion, planVersion types.String) (bool, diag.Diagnostics) { - if stateVersion.Value == "" || stateVersion.Value == planVersion.Value { - return true, nil - } - - var diags diag.Diagnostics - oldVersion, err := semver.Parse(stateVersion.Value) - if err != nil { - diags.AddError("failed to parse previous Elasticsearch version", err.Error()) - return false, diags - } - newVersion, err := semver.Parse(planVersion.Value) - if err != nil { - diags.AddError("failed to parse new Elasticsearch version", err.Error()) - return false, diags - } - - // if the version change moves from non-node_roles to one - // that supports node roles, do not migrate on that step. - if oldVersion.LT(DataTiersVersion) && newVersion.GE(DataTiersVersion) { - return false, nil - } - - return true, nil -} diff --git a/ec/ecresource/deploymentresource/utils/stopped_resource.go b/ec/ecresource/deploymentresource/utils/stopped_resource.go deleted file mode 100644 index beedaf9b7..000000000 --- a/ec/ecresource/deploymentresource/utils/stopped_resource.go +++ /dev/null @@ -1,50 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package utils - -import "github.com/elastic/cloud-sdk-go/pkg/models" - -// IsApmResourceStopped returns true if the resource is stopped. -func IsApmResourceStopped(res *models.ApmResourceInfo) bool { - return res == nil || res.Info == nil || res.Info.Status == nil || - *res.Info.Status == "stopped" -} - -// IsIntegrationsServerResourceStopped returns true if the resource is stopped. -func IsIntegrationsServerResourceStopped(res *models.IntegrationsServerResourceInfo) bool { - return res == nil || res.Info == nil || res.Info.Status == nil || - *res.Info.Status == "stopped" -} - -// IsEsResourceStopped returns true if the resource is stopped. -func IsEsResourceStopped(res *models.ElasticsearchResourceInfo) bool { - return res == nil || res.Info == nil || res.Info.Status == nil || - *res.Info.Status == "stopped" -} - -// IsEssResourceStopped returns true if the resource is stopped. -func IsEssResourceStopped(res *models.EnterpriseSearchResourceInfo) bool { - return res == nil || res.Info == nil || res.Info.Status == nil || - *res.Info.Status == "stopped" -} - -// IsKibanaResourceStopped returns true if the resource is stopped. -func IsKibanaResourceStopped(res *models.KibanaResourceInfo) bool { - return res == nil || res.Info == nil || res.Info.Status == nil || - *res.Info.Status == "stopped" -} diff --git a/ec/ecresource/deploymentresource/utils/stopped_resource_test.go b/ec/ecresource/deploymentresource/utils/stopped_resource_test.go deleted file mode 100644 index da0017519..000000000 --- a/ec/ecresource/deploymentresource/utils/stopped_resource_test.go +++ /dev/null @@ -1,155 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package utils - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" -) - -func Test_IsApmResourceStopped(t *testing.T) { - type args struct { - res *models.ApmResourceInfo - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "started resource returns false", - args: args{res: &models.ApmResourceInfo{Info: &models.ApmInfo{ - Status: ec.String("started"), - }}}, - want: false, - }, - { - name: "stopped resource returns true", - args: args{res: &models.ApmResourceInfo{Info: &models.ApmInfo{ - Status: ec.String("stopped"), - }}}, - want: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := IsApmResourceStopped(tt.args.res) - assert.Equal(t, tt.want, got) - }) - } -} - -func Test_IsEsResourceStopped(t *testing.T) { - type args struct { - res *models.ElasticsearchResourceInfo - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "started resource returns false", - args: args{res: &models.ElasticsearchResourceInfo{Info: &models.ElasticsearchClusterInfo{ - Status: ec.String("started"), - }}}, - want: false, - }, - { - name: "stopped resource returns true", - args: args{res: &models.ElasticsearchResourceInfo{Info: &models.ElasticsearchClusterInfo{ - Status: ec.String("stopped"), - }}}, - want: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := IsEsResourceStopped(tt.args.res) - assert.Equal(t, tt.want, got) - }) - } -} - -func Test_IsEssResourceStopped(t *testing.T) { - type args struct { - res *models.EnterpriseSearchResourceInfo - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "started resource returns false", - args: args{res: &models.EnterpriseSearchResourceInfo{Info: &models.EnterpriseSearchInfo{ - Status: ec.String("started"), - }}}, - want: false, - }, - { - name: "stopped resource returns true", - args: args{res: &models.EnterpriseSearchResourceInfo{Info: &models.EnterpriseSearchInfo{ - Status: ec.String("stopped"), - }}}, - want: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := IsEssResourceStopped(tt.args.res) - assert.Equal(t, tt.want, got) - }) - } -} - -func Test_IsKibanaResourceStopped(t *testing.T) { - type args struct { - res *models.KibanaResourceInfo - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "started resource returns false", - args: args{res: &models.KibanaResourceInfo{Info: &models.KibanaClusterInfo{ - Status: ec.String("started"), - }}}, - want: false, - }, - { - name: "stopped resource returns true", - args: args{res: &models.KibanaResourceInfo{Info: &models.KibanaClusterInfo{ - Status: ec.String("stopped"), - }}}, - want: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := IsKibanaResourceStopped(tt.args.res) - assert.Equal(t, tt.want, got) - }) - } -} From a2ce9d4e8681505e70f7188429199f6e2374ca07 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 28 Dec 2022 17:01:58 +0100 Subject: [PATCH 061/104] unit tests for UseNodeRoles --- .../elasticsearch/v2/elasticsearch_payload.go | 81 +---------- .../v2/elasticsearch_payload_test.go | 39 +++++ .../elasticsearch/v2/node_roles.go | 86 +++++++++++ .../elasticsearch/v2/node_roles_test.go | 135 ++++++++++++++++++ 4 files changed, 267 insertions(+), 74 deletions(-) create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_test.go diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go index 5d0931cc9..dbec5d0ed 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go @@ -19,16 +19,14 @@ package v2 import ( "context" - "fmt" "strings" - "github.com/blang/semver" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" + "golang.org/x/exp/slices" ) type ElasticsearchTF struct { @@ -190,19 +188,13 @@ func updateNodeRolesOnDedicatedTiers(topologies []*models.ElasticsearchClusterTo } func removeItemFromSlice(slice []string, item string) []string { - var hasItem bool - var itemIndex int - for i, str := range slice { - if str == item { - hasItem = true - itemIndex = i - } - } - if hasItem { - copy(slice[itemIndex:], slice[itemIndex+1:]) - return slice[:len(slice)-1] + i := slices.Index(slice, item) + + if i == -1 { + return slice } - return slice + + return slices.Delete(slice, i, i+1) } func dedicatedTopoogies(topologies []*models.ElasticsearchClusterTopologyElement) (dataTier *models.ElasticsearchClusterTopologyElement, hasMasterTier, hasIngestTier bool) { @@ -301,62 +293,3 @@ func EnrichElasticsearchTemplate(tpl *models.ElasticsearchPayload, templateId, v return tpl } - -func CompatibleWithNodeRoles(version string) (bool, error) { - deploymentVersion, err := semver.Parse(version) - if err != nil { - return false, fmt.Errorf("failed to parse Elasticsearch version: %w", err) - } - - return deploymentVersion.GE(utils.DataTiersVersion), nil -} - -func UseNodeRoles(stateVersion, planVersion types.String) (bool, diag.Diagnostics) { - - useNodeRoles, err := CompatibleWithNodeRoles(planVersion.Value) - - if err != nil { - var diags diag.Diagnostics - diags.AddError("Failed to determine whether to use node_roles", err.Error()) - return false, diags - } - - convertLegacy, diags := legacyToNodeRoles(stateVersion, planVersion) - - if diags.HasError() { - return false, diags - } - - return useNodeRoles && convertLegacy, nil -} - -// legacyToNodeRoles returns true when the legacy "node_type_*" should be -// migrated over to node_roles. Which will be true when: -// * The version field doesn't change. -// * The version field changes but: -// - The Elasticsearch.0.toplogy doesn't have any node_type_* set. -func legacyToNodeRoles(stateVersion, planVersion types.String) (bool, diag.Diagnostics) { - if stateVersion.Value == "" || stateVersion.Value == planVersion.Value { - return true, nil - } - - var diags diag.Diagnostics - oldVersion, err := semver.Parse(stateVersion.Value) - if err != nil { - diags.AddError("failed to parse previous Elasticsearch version", err.Error()) - return false, diags - } - newVersion, err := semver.Parse(planVersion.Value) - if err != nil { - diags.AddError("failed to parse new Elasticsearch version", err.Error()) - return false, diags - } - - // if the version change moves from non-node_roles to one - // that supports node roles, do not migrate on that step. - if oldVersion.LT(utils.DataTiersVersion) && newVersion.GE(utils.DataTiersVersion) { - return false, nil - } - - return true, nil -} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go index da5d7ba5d..c2d038cec 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go @@ -1941,3 +1941,42 @@ func Test_writeElasticsearch(t *testing.T) { }) } } + +func Test_removeItemFromSlice(t *testing.T) { + type args struct { + slice []string + item string + } + + tests := []struct { + name string + args args + expected []string + }{ + { + name: "it should NOT modify slice if the slice doesn't contain item", + args: args{ + slice: []string{"second"}, + item: "first", + }, + expected: []string{"second"}, + }, + + { + name: "it should remove an item from slice if the slice contains it", + args: args{ + slice: []string{"first", "second"}, + item: "first", + }, + expected: []string{"second"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := removeItemFromSlice(tt.args.slice, tt.args.item) + + assert.Equal(t, tt.expected, got) + }) + } +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go new file mode 100644 index 000000000..25a658094 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "fmt" + + "github.com/blang/semver" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func CompatibleWithNodeRoles(version string) (bool, error) { + deploymentVersion, err := semver.Parse(version) + if err != nil { + return false, fmt.Errorf("failed to parse Elasticsearch version: %w", err) + } + + return deploymentVersion.GE(utils.DataTiersVersion), nil +} + +func UseNodeRoles(stateVersion, planVersion types.String) (bool, diag.Diagnostics) { + + useNodeRoles, err := CompatibleWithNodeRoles(planVersion.Value) + + if err != nil { + var diags diag.Diagnostics + diags.AddError("Failed to determine whether to use node_roles", err.Error()) + return false, diags + } + + convertLegacy, diags := legacyToNodeRoles(stateVersion, planVersion) + + if diags.HasError() { + return false, diags + } + + return useNodeRoles && convertLegacy, nil +} + +// legacyToNodeRoles returns true when the legacy "node_type_*" should be +// migrated over to node_roles. Which will be true when: +// * The version field doesn't change. +// * The version field changes but: +// - The Elasticsearch.0.toplogy doesn't have any node_type_* set. +func legacyToNodeRoles(stateVersion, planVersion types.String) (bool, diag.Diagnostics) { + if stateVersion.Value == "" || stateVersion.Value == planVersion.Value { + return true, nil + } + + var diags diag.Diagnostics + oldVersion, err := semver.Parse(stateVersion.Value) + if err != nil { + diags.AddError("Failed to parse previous Elasticsearch version", err.Error()) + return false, diags + } + newVersion, err := semver.Parse(planVersion.Value) + if err != nil { + diags.AddError("Failed to parse new Elasticsearch version", err.Error()) + return false, diags + } + + // if the version change moves from non-node_roles to one + // that supports node roles, do not migrate on that step. + if oldVersion.LT(utils.DataTiersVersion) && newVersion.GE(utils.DataTiersVersion) { + return false, nil + } + + return true, nil +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_test.go new file mode 100644 index 000000000..08f5aa88e --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_test.go @@ -0,0 +1,135 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" +) + +func Test_UseNodeRoles(t *testing.T) { + type args struct { + stateVersion string + planVersion string + } + tests := []struct { + name string + args args + expected bool + expectedDiags diag.Diagnostics + }{ + { + name: "it should fail when plan version is invalid", + args: args{ + stateVersion: "7.0.0", + planVersion: "invalid_plan_version", + }, + expected: true, + expectedDiags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("Failed to determine whether to use node_roles", "failed to parse Elasticsearch version: No Major.Minor.Patch elements found") + return diags + }(), + }, + + { + name: "it should fail when state version is invalid", + args: args{ + stateVersion: "invalid.state.version", + planVersion: "7.0.0", + }, + expected: true, + expectedDiags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("Failed to parse previous Elasticsearch version", `Invalid character(s) found in major number "invalid"`) + return diags + }(), + }, + + { + name: "it should instruct to use node_types if both version are prior to 7.10.0", + args: args{ + stateVersion: "7.9.0", + planVersion: "7.9.1", + }, + expected: false, + }, + + { + name: "it should instruct to use node_types if plan version is 7.10.0 and state version is prior to 7.10.0", + args: args{ + stateVersion: "7.9.0", + planVersion: "7.10.0", + }, + expected: false, + }, + + { + name: "it should instruct to use node_types if plan version is after 7.10.0 and state version is prior to 7.10.0", + args: args{ + stateVersion: "7.9.2", + planVersion: "7.10.1", + }, + expected: false, + }, + + { + name: "it should instruct to use node_types if plan version is after 7.10.0 and state version is prior to 7.10.0", + args: args{ + stateVersion: "7.9.2", + planVersion: "7.10.1", + }, + expected: false, + }, + + { + name: "it should instruct to use node_roles if plan version is equal to state version and both is 7.10.0", + args: args{ + stateVersion: "7.10.0", + planVersion: "7.10.0", + }, + expected: true, + }, + + { + name: "it should instruct to use node_roles if plan version is equal to state version and both is after 7.10.0", + args: args{ + stateVersion: "7.10.2", + planVersion: "7.10.2", + }, + expected: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, diags := UseNodeRoles(types.String{Value: tt.args.stateVersion}, types.String{Value: tt.args.planVersion}) + + if tt.expectedDiags == nil { + assert.Nil(t, diags) + assert.Equal(t, tt.expected, got) + } else { + assert.Equal(t, tt.expectedDiags, diags) + } + + }) + } +} From 663167fa149197caa73afe4ccf08562add163e9e Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Fri, 30 Dec 2022 21:12:55 +0100 Subject: [PATCH 062/104] (WIP) Bring back logic of migrating node_types to node_roles --- .../v2/deployment_update_payload.go | 2 +- .../v2/deployment_update_payload_test.go | 103 +++++++++++++++++ .../elasticsearch/v2/elasticsearch_payload.go | 50 +++++---- .../v2/elasticsearch_topology.go | 2 +- .../elasticsearch/v2/node_roles.go | 104 +++++++++++++++++- .../v2/node_roles_plan_modifier.go | 58 ++-------- .../elasticsearch/v2/node_roles_test.go | 5 +- .../v2/node_types_plan_modifier.go | 56 ++-------- 8 files changed, 262 insertions(+), 118 deletions(-) diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload.go index 9dab746db..6c7695960 100644 --- a/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload.go +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload.go @@ -70,7 +70,7 @@ func (plan DeploymentTF) UpdateRequest(ctx context.Context, client *api.API, sta // This might not be necessary going forward as we move to // tiered Elasticsearch nodes. - useNodeRoles, diags := elasticsearchv2.UseNodeRoles(state.Version, plan.Version) + useNodeRoles, diags := elasticsearchv2.UseNodeRoles(ctx, state.Version, plan.Version, plan.Elasticsearch) if diags.HasError() { return nil, diags diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload_test.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload_test.go index 5056f7651..575ec4a05 100644 --- a/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload_test.go +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload_test.go @@ -1300,6 +1300,109 @@ func Test_updateResourceToModel(t *testing.T) { }, }, + { + name: "does not migrate node_type to node_role on version upgrade that's higher than 7.10.0", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.11.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + }, + ), + }, + }, + state: &Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.10.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + HotTier: elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + }, + ), + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.11.1", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(16384), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + Ml: ec.Bool(false), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + }, + }, + }, + { name: "migrates node_type to node_role when the existing topology element size is updated", args: args{ diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go index dbec5d0ed..2aa98d95c 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go @@ -125,31 +125,43 @@ func (es *ElasticsearchTF) payload(ctx context.Context, res *models.Elasticsearc return res, diags } -func (es *ElasticsearchTF) topologiesPayload(ctx context.Context, topologies []*models.ElasticsearchClusterTopologyElement) diag.Diagnostics { - var diags diag.Diagnostics +func (es *ElasticsearchTF) topologyObjects() map[string]types.Object { + return map[string]types.Object{ + "hot_content": es.HotContentTier, + "warm": es.WarmTier, + "cold": es.ColdTier, + "frozen": es.FrozenTier, + "ml": es.MlTier, + "master": es.MasterTier, + "coordinating": es.CoordinatingTier, + } +} - diags.Append(topologyPayload(ctx, es.HotContentTier, "hot_content", topologies)...) - diags.Append(topologyPayload(ctx, es.CoordinatingTier, "coordinating", topologies)...) - diags.Append(topologyPayload(ctx, es.MasterTier, "master", topologies)...) - diags.Append(topologyPayload(ctx, es.WarmTier, "warm", topologies)...) - diags.Append(topologyPayload(ctx, es.ColdTier, "cold", topologies)...) - diags.Append(topologyPayload(ctx, es.FrozenTier, "frozen", topologies)...) - diags.Append(topologyPayload(ctx, es.MlTier, "ml", topologies)...) +func (es *ElasticsearchTF) topologies(ctx context.Context) (map[string]*ElasticsearchTopologyTF, diag.Diagnostics) { + var diagnostics diag.Diagnostics - return diags -} + tierObjects := es.topologyObjects() + res := make(map[string]*ElasticsearchTopologyTF, len(tierObjects)) -func topologyPayload(ctx context.Context, topologyObj types.Object, id string, topologies []*models.ElasticsearchClusterTopologyElement) diag.Diagnostics { - var diags diag.Diagnostics + for topologyId, topologyObject := range tierObjects { + tier, diags := objectToTopology(ctx, topologyObject) + diagnostics.Append(diags...) + res[topologyId] = tier + } + + return res, diagnostics +} - if !topologyObj.IsNull() && !topologyObj.IsUnknown() { - var topology ElasticsearchTopologyTF +func (es *ElasticsearchTF) topologiesPayload(ctx context.Context, topologyModels []*models.ElasticsearchClusterTopologyElement) diag.Diagnostics { + tiers, diags := es.topologies(ctx) - ds := tfsdk.ValueAs(ctx, topologyObj, &topology) - diags.Append(ds...) + if diags.HasError() { + return diags + } - if !ds.HasError() { - diags.Append(topology.payload(ctx, id, topologies)...) + for tierId, tier := range tiers { + if tier != nil { + diags.Append(tier.payload(ctx, tierId, topologyModels)...) } } diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go index ce795c78c..83201c263 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go @@ -242,7 +242,7 @@ func (topology *ElasticsearchTopologyTF) HasNodeType() bool { topology.NodeTypeMl.Value != "" } -func ObjectToTopology(ctx context.Context, obj types.Object) (*ElasticsearchTopologyTF, diag.Diagnostics) { +func objectToTopology(ctx context.Context, obj types.Object) (*ElasticsearchTopologyTF, diag.Diagnostics) { if obj.IsNull() || obj.IsUnknown() { return nil, nil } diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go index 25a658094..3c9497325 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go @@ -18,11 +18,14 @@ package v2 import ( + "context" "fmt" "github.com/blang/semver" "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" ) @@ -35,7 +38,7 @@ func CompatibleWithNodeRoles(version string) (bool, error) { return deploymentVersion.GE(utils.DataTiersVersion), nil } -func UseNodeRoles(stateVersion, planVersion types.String) (bool, diag.Diagnostics) { +func UseNodeRoles(ctx context.Context, stateVersion, planVersion types.String, planElasticsearch types.Object) (bool, diag.Diagnostics) { useNodeRoles, err := CompatibleWithNodeRoles(planVersion.Value) @@ -45,7 +48,7 @@ func UseNodeRoles(stateVersion, planVersion types.String) (bool, diag.Diagnostic return false, diags } - convertLegacy, diags := legacyToNodeRoles(stateVersion, planVersion) + convertLegacy, diags := legacyToNodeRoles(ctx, stateVersion, planVersion, planElasticsearch) if diags.HasError() { return false, diags @@ -59,7 +62,7 @@ func UseNodeRoles(stateVersion, planVersion types.String) (bool, diag.Diagnostic // * The version field doesn't change. // * The version field changes but: // - The Elasticsearch.0.toplogy doesn't have any node_type_* set. -func legacyToNodeRoles(stateVersion, planVersion types.String) (bool, diag.Diagnostics) { +func legacyToNodeRoles(ctx context.Context, stateVersion, planVersion types.String, planElasticsearch types.Object) (bool, diag.Diagnostics) { if stateVersion.Value == "" || stateVersion.Value == planVersion.Value { return true, nil } @@ -82,5 +85,100 @@ func legacyToNodeRoles(stateVersion, planVersion types.String) (bool, diag.Diagn return false, nil } + // When any topology elements in the state have the node_type_* + // properties set, the node_role field cannot be used, since + // we'd be changing the version AND migrating over `node_role`s + // which is not permitted by the API. + + var es *ElasticsearchTF + + if diags := tfsdk.ValueAs(ctx, planElasticsearch, &es); diags.HasError() { + return false, diags + } + + if es == nil { + diags.AddError("Cannot migrate node types to node roles", "cannot find elasticsearch object") + return false, diags + } + + tiers, diags := es.topologies(ctx) + + if diags.HasError() { + return false, diags + } + + for _, tier := range tiers { + if tier != nil && tier.HasNodeType() { + return false, nil + } + } + return true, nil } + +func useStateAndNodeRolesInPlanModifiers(ctx context.Context, req tfsdk.ModifyAttributePlanRequest, resp *tfsdk.ModifyAttributePlanResponse) (useState, useNodeRoles bool) { + if req.AttributeState == nil || resp.AttributePlan == nil || req.AttributeConfig == nil { + return false, false + } + + if !resp.AttributePlan.IsUnknown() { + return false, false + } + + // if the config is the unknown value, use the unknown value otherwise, interpolation gets messed up + if req.AttributeConfig.IsUnknown() { + return false, false + } + + // if there is no state for "version" return + var stateVersion types.String + + if diags := req.State.GetAttribute(ctx, path.Root("version"), &stateVersion); diags.HasError() { + resp.Diagnostics.Append(diags...) + return false, false + } + + if stateVersion.IsNull() { + return false, false + } + + // if template changed return + templateChanged, diags := isAttributeChanged(ctx, path.Root("deployment_template_id"), req) + + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return false, false + } + + if templateChanged { + return false, false + } + + // get version for plan and state and calculate useNodeRoles + + var planVersion types.String + + if diags := req.Plan.GetAttribute(ctx, path.Root("version"), &planVersion); diags.HasError() { + resp.Diagnostics.Append(diags...) + return false, false + } + + var elasticsearch types.Object + + if diags := req.Plan.GetAttribute(ctx, path.Root("elasticsearch"), &elasticsearch); diags.HasError() { + resp.Diagnostics.Append(diags...) + return false, false + } + + useNodeRoles, diags = UseNodeRoles(ctx, stateVersion, planVersion, elasticsearch) + + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return false, false + } + + useState = true + + return +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier.go index 96b843e6a..10276b37f 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier.go @@ -20,9 +20,7 @@ package v2 import ( "context" - "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/tfsdk" - "github.com/hashicorp/terraform-plugin-framework/types" ) func UseNodeRolesDefault() tfsdk.AttributePlanModifier { @@ -32,60 +30,28 @@ func UseNodeRolesDefault() tfsdk.AttributePlanModifier { type nodeRolesDefault struct{} func (r nodeRolesDefault) Modify(ctx context.Context, req tfsdk.ModifyAttributePlanRequest, resp *tfsdk.ModifyAttributePlanResponse) { - if req.AttributeState == nil || resp.AttributePlan == nil || req.AttributeConfig == nil { - return - } - - if !resp.AttributePlan.IsUnknown() { - return - } - - // if the config is the unknown value, use the unknown value otherwise, interpolation gets messed up - if req.AttributeConfig.IsUnknown() { - return - } - - // if there is no state for "version" return - var stateVersion types.String + useState, useNodeRoles := useStateAndNodeRolesInPlanModifiers(ctx, req, resp) - if diags := req.State.GetAttribute(ctx, path.Root("version"), &stateVersion); diags.HasError() { - resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { return } - if stateVersion.IsNull() { + if !useState { return } - // if template changed return - templateChanged, diags := isAttributeChanged(ctx, path.Root("deployment_template_id"), req) - - resp.Diagnostics.Append(diags...) - - if diags.HasError() { - return - } - - if templateChanged { - return - } - - // get version for plan and state and calculate useNodeRoles - - var planVersion types.String - - if diags := req.Plan.GetAttribute(ctx, path.Root("version"), &planVersion); diags.HasError() { - resp.Diagnostics.Append(diags...) - return - } - - useNodeRoles, diags := UseNodeRoles(stateVersion, planVersion) - - if diags.HasError() { - resp.Diagnostics.Append(diags...) + // If useNodeRoles is false, we can use the current state and + // it should be null in this case - we don't migrate back from node_roles to node_types + if !useNodeRoles && !req.AttributeState.IsNull() { + // it should not happen return } + // If useNodeRoles is true, then there is either + // * state already uses node_roles or + // * state uses node_types but we need to migrate to node_roles. + // We cannot use state in the second case (migration to node_roles) + // It happens when the attriubute state is null. if useNodeRoles && req.AttributeState.IsNull() { return } diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_test.go index 08f5aa88e..e33cb6a18 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_test.go @@ -17,7 +17,9 @@ package v2 +/* import ( + "context" "testing" "github.com/hashicorp/terraform-plugin-framework/diag" @@ -121,7 +123,7 @@ func Test_UseNodeRoles(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, diags := UseNodeRoles(types.String{Value: tt.args.stateVersion}, types.String{Value: tt.args.planVersion}) + got, diags := UseNodeRoles(context.Background(), types.String{Value: tt.args.stateVersion}, types.String{Value: tt.args.planVersion}) if tt.expectedDiags == nil { assert.Nil(t, diags) @@ -133,3 +135,4 @@ func Test_UseNodeRoles(t *testing.T) { }) } } +*/ diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier.go index d55529747..96dec3b86 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier.go @@ -24,7 +24,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/tfsdk" - "github.com/hashicorp/terraform-plugin-framework/types" ) // Use `self` as value of `observability`'s `deployment_id` attribute @@ -35,60 +34,23 @@ func UseNodeTypesDefault() tfsdk.AttributePlanModifier { type nodeTypesDefault struct{} func (r nodeTypesDefault) Modify(ctx context.Context, req tfsdk.ModifyAttributePlanRequest, resp *tfsdk.ModifyAttributePlanResponse) { - if req.AttributeState == nil || resp.AttributePlan == nil || req.AttributeConfig == nil { - return - } + useState, useNodeRoles := useStateAndNodeRolesInPlanModifiers(ctx, req, resp) - if !resp.AttributePlan.IsUnknown() { + if resp.Diagnostics.HasError() { return } - // if the config is the unknown value, use the unknown value otherwise, interpolation gets messed up - if req.AttributeConfig.IsUnknown() { + if !useState { return } - // if there is no state for "version" return - var stateVersion types.String - - if diags := req.State.GetAttribute(ctx, path.Root("version"), &stateVersion); diags.HasError() { - resp.Diagnostics.Append(diags...) - return - } - - if stateVersion.IsNull() { - return - } - - // if template changed return - templateChanged, diags := isAttributeChanged(ctx, path.Root("deployment_template_id"), req) - - resp.Diagnostics.Append(diags...) - - if diags.HasError() { - return - } - - if templateChanged { - return - } - - // get version for plan and state and calculate useNodeRoles - - var planVersion types.String - - if diags := req.Plan.GetAttribute(ctx, path.Root("version"), &planVersion); diags.HasError() { - resp.Diagnostics.Append(diags...) - return - } - - useNodeRoles, diags := UseNodeRoles(stateVersion, planVersion) - - if diags.HasError() { - resp.Diagnostics.Append(diags...) - return - } + // If useNodeRoles is false, we can use the current state + // If useNodeRoles is true, then there is either + // * state already uses node_roles or + // * state uses node_types but we need to migrate to node_roles. + // We cannot use state in the second case (migration to node_roles) + // It happens when the attriubute state is not null. if useNodeRoles && !req.AttributeState.IsNull() { return } From f0ca456d88336c6f885e3c5db1e2d7259c8b703f Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Tue, 3 Jan 2023 18:45:16 +0100 Subject: [PATCH 063/104] Improve node_type detection --- .../elasticsearch/v2/elasticsearch_topology.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go index 83201c263..688e9fab4 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go @@ -236,10 +236,12 @@ func (topology *ElasticsearchTopologyTF) parseLegacyNodeType(nodeType *models.El } func (topology *ElasticsearchTopologyTF) HasNodeType() bool { - return topology.NodeTypeData.Value != "" || - topology.NodeTypeIngest.Value != "" || - topology.NodeTypeMaster.Value != "" || - topology.NodeTypeMl.Value != "" + for _, nodeType := range []types.String{topology.NodeTypeData, topology.NodeTypeIngest, topology.NodeTypeMaster, topology.NodeTypeMl} { + if !nodeType.IsUnknown() && !nodeType.IsNull() && nodeType.Value != "" { + return true + } + } + return false } func objectToTopology(ctx context.Context, obj types.Object) (*ElasticsearchTopologyTF, diag.Diagnostics) { From 8f14a40c88b8d87f4ac65e5d43f34c117f12bd5a Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 4 Jan 2023 10:56:36 +0100 Subject: [PATCH 064/104] improve and rearrange node_types plan modifier --- .../elasticsearch/v2/node_roles.go | 17 +++++++++++++ .../v2/node_types_plan_modifier.go | 24 ++++--------------- 2 files changed, 21 insertions(+), 20 deletions(-) diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go index 3c9497325..ec75123d1 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go @@ -23,6 +23,7 @@ import ( "github.com/blang/semver" "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/tfsdk" @@ -182,3 +183,19 @@ func useStateAndNodeRolesInPlanModifiers(ctx context.Context, req tfsdk.ModifyAt return } + +func isAttributeChanged(ctx context.Context, p path.Path, req tfsdk.ModifyAttributePlanRequest) (bool, diag.Diagnostics) { + var planValue attr.Value + + if diags := req.Plan.GetAttribute(ctx, p, &planValue); diags.HasError() { + return false, diags + } + + var stateValue attr.Value + + if diags := req.State.GetAttribute(ctx, p, &stateValue); diags.HasError() { + return false, diags + } + + return !planValue.Equal(stateValue), nil +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier.go index 96dec3b86..fcfb3f53c 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier.go @@ -20,9 +20,6 @@ package v2 import ( "context" - "github.com/hashicorp/terraform-plugin-framework/attr" - "github.com/hashicorp/terraform-plugin-framework/diag" - "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/tfsdk" ) @@ -44,7 +41,10 @@ func (r nodeTypesDefault) Modify(ctx context.Context, req tfsdk.ModifyAttributeP return } - // If useNodeRoles is false, we can use the current state + // If useNodeRoles is false, we can use the current state if it's not null + if !useNodeRoles && req.AttributeState.IsNull() { + return + } // If useNodeRoles is true, then there is either // * state already uses node_roles or @@ -67,19 +67,3 @@ func (r nodeTypesDefault) Description(ctx context.Context) string { func (r nodeTypesDefault) MarkdownDescription(ctx context.Context) string { return "Use current state if it's still valid." } - -func isAttributeChanged(ctx context.Context, p path.Path, req tfsdk.ModifyAttributePlanRequest) (bool, diag.Diagnostics) { - var planValue attr.Value - - if diags := req.Plan.GetAttribute(ctx, p, &planValue); diags.HasError() { - return false, diags - } - - var stateValue attr.Value - - if diags := req.State.GetAttribute(ctx, p, &stateValue); diags.HasError() { - return false, diags - } - - return !planValue.Equal(stateValue), nil -} From 0e50ae7b43938b7b5071fa49456709574af354e6 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 4 Jan 2023 12:59:57 +0100 Subject: [PATCH 065/104] enable test for UseNodeRoles + minor refactoring --- .../elasticsearch/v2/node_roles.go | 4 +-- .../elasticsearch/v2/node_roles_test.go | 36 ++++++++++++++++--- 2 files changed, 32 insertions(+), 8 deletions(-) diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go index ec75123d1..f42de02ce 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go @@ -179,9 +179,7 @@ func useStateAndNodeRolesInPlanModifiers(ctx context.Context, req tfsdk.ModifyAt return false, false } - useState = true - - return + return true, useNodeRoles } func isAttributeChanged(ctx context.Context, p path.Path, req tfsdk.ModifyAttributePlanRequest) (bool, diag.Diagnostics) { diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_test.go index e33cb6a18..36a951014 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_test.go @@ -17,20 +17,22 @@ package v2 -/* import ( "context" "testing" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" ) func Test_UseNodeRoles(t *testing.T) { type args struct { - stateVersion string - planVersion string + stateVersion string + planVersion string + elasticsearch Elasticsearch } tests := []struct { name string @@ -38,6 +40,7 @@ func Test_UseNodeRoles(t *testing.T) { expected bool expectedDiags diag.Diagnostics }{ + { name: "it should fail when plan version is invalid", args: args{ @@ -119,11 +122,35 @@ func Test_UseNodeRoles(t *testing.T) { }, expected: true, }, + + { + name: "it should instruct to use node_types if both plan version and state version are after 7.10.0 and plan uses node_types", + args: args{ + stateVersion: "7.11.1", + planVersion: "7.12.0", + elasticsearch: Elasticsearch{ + HotTier: &ElasticsearchTopology{ + id: "hot_content", + NodeTypeData: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMl: ec.String("false"), + }, + }, + }, + expected: false, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, diags := UseNodeRoles(context.Background(), types.String{Value: tt.args.stateVersion}, types.String{Value: tt.args.planVersion}) + var elasticsearchObject types.Object + + diags := tfsdk.ValueFrom(context.Background(), tt.args.elasticsearch, ElasticsearchSchema().FrameworkType(), &elasticsearchObject) + + assert.Nil(t, diags) + + got, diags := UseNodeRoles(context.Background(), types.String{Value: tt.args.stateVersion}, types.String{Value: tt.args.planVersion}, elasticsearchObject) if tt.expectedDiags == nil { assert.Nil(t, diags) @@ -135,4 +162,3 @@ func Test_UseNodeRoles(t *testing.T) { }) } } -*/ From e11db61b5b385322a170aaea9633c998962a8eba Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 4 Jan 2023 19:16:09 +0100 Subject: [PATCH 066/104] unit tests for node_types and node_roles modifiers --- .../elasticsearch/v2/node_roles.go | 10 +- .../v2/node_roles_plan_modifier.go | 2 +- .../v2/node_roles_plan_modifier_test.go | 241 ++++++++++++++++++ .../elasticsearch/v2/node_roles_test.go | 2 +- .../v2/node_types_plan_modifier.go | 3 +- .../v2/node_types_plan_modifier_test.go | 208 +++++++++++++++ 6 files changed, 459 insertions(+), 7 deletions(-) create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier_test.go create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier_test.go diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go index f42de02ce..0d5bc6002 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go @@ -40,8 +40,7 @@ func CompatibleWithNodeRoles(version string) (bool, error) { } func UseNodeRoles(ctx context.Context, stateVersion, planVersion types.String, planElasticsearch types.Object) (bool, diag.Diagnostics) { - - useNodeRoles, err := CompatibleWithNodeRoles(planVersion.Value) + compatibleWithNodeRoles, err := CompatibleWithNodeRoles(planVersion.Value) if err != nil { var diags diag.Diagnostics @@ -49,13 +48,17 @@ func UseNodeRoles(ctx context.Context, stateVersion, planVersion types.String, p return false, diags } + if !compatibleWithNodeRoles { + return false, nil + } + convertLegacy, diags := legacyToNodeRoles(ctx, stateVersion, planVersion, planElasticsearch) if diags.HasError() { return false, diags } - return useNodeRoles && convertLegacy, nil + return convertLegacy, nil } // legacyToNodeRoles returns true when the legacy "node_type_*" should be @@ -127,6 +130,7 @@ func useStateAndNodeRolesInPlanModifiers(ctx context.Context, req tfsdk.ModifyAt } // if the config is the unknown value, use the unknown value otherwise, interpolation gets messed up + // it's the precaution taken from the Framework's `UseStateForUnknown` plan modifier if req.AttributeConfig.IsUnknown() { return false, false } diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier.go index 10276b37f..930069ea2 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier.go @@ -29,7 +29,7 @@ func UseNodeRolesDefault() tfsdk.AttributePlanModifier { type nodeRolesDefault struct{} -func (r nodeRolesDefault) Modify(ctx context.Context, req tfsdk.ModifyAttributePlanRequest, resp *tfsdk.ModifyAttributePlanResponse) { +func (m nodeRolesDefault) Modify(ctx context.Context, req tfsdk.ModifyAttributePlanRequest, resp *tfsdk.ModifyAttributePlanResponse) { useState, useNodeRoles := useStateAndNodeRolesInPlanModifiers(ctx, req, resp) if resp.Diagnostics.HasError() { diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier_test.go new file mode 100644 index 000000000..fdff82c80 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier_test.go @@ -0,0 +1,241 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2_test + +import ( + "context" + "testing" + + deploymentv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/deployment/v2" + v2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" +) + +func Test_nodeRolesPlanModifier(t *testing.T) { + type args struct { + attributeState []string + attributePlan []string + deploymentState *deploymentv2.Deployment + deploymentPlan deploymentv2.Deployment + } + tests := []struct { + name string + args args + expectedDiags diag.Diagnostics + expected []string + expectedUnknown bool + }{ + { + name: "it should keep current plan value if it's defined", + args: args{ + attributePlan: []string{ + "data_content", + "data_hot", + "ingest", + "master", + }, + }, + expected: []string{ + "data_content", + "data_hot", + "ingest", + "master", + }, + }, + + { + name: "it should not use state if state doesn't have `version`", + args: args{}, + expectedUnknown: true, + }, + + { + name: "it should not use state if plan changed deployment template`", + args: args{ + deploymentState: &deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-storage-optimized-v3", + }, + }, + expectedUnknown: true, + }, + + { + name: "it should not use state if plan version is less than 7.10.0 but the attribute state is not null`", + args: args{ + attributeState: []string{"data_hot"}, + deploymentState: &deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.9.0", + }, + }, + expectedUnknown: true, + }, + + { + name: "it should not use state if plan version is changed over 7.10.0 and the attribute state is not null`", + args: args{ + attributeState: []string{"data_hot"}, + deploymentState: &deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.9.0", + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.10.1", + }, + }, + expectedUnknown: true, + }, + + { + name: "it should use state if plan version is changed over 7.10.0 and the attribute state is null`", + args: args{ + attributeState: nil, + deploymentState: &deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.9.0", + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.10.1", + }, + }, + expected: nil, + }, + + { + name: "it should use state if both plan and state versions is or higher than 7.10.0 and the attribute state is not null`", + args: args{ + attributeState: []string{"data_hot"}, + deploymentState: &deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.10.0", + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.10.0", + }, + }, + expected: []string{"data_hot"}, + }, + + { + name: "it should not use state if both plan and state versions is or higher than 7.10.0 and the attribute state is null`", + args: args{ + attributeState: nil, + deploymentState: &deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.10.0", + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.10.0", + }, + }, + expectedUnknown: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + modifier := v2.UseNodeRolesDefault() + + // attributeConfig value is not used in the plan modifer + // it just should be known + var attributeConfigValue attr.Value + diags := tfsdk.ValueFrom(context.Background(), []string{}, types.SetType{ElemType: types.StringType}, &attributeConfigValue) + assert.Nil(t, diags) + + var attributeStateValue attr.Value + diags = tfsdk.ValueFrom(context.Background(), tt.args.attributeState, types.SetType{ElemType: types.StringType}, &attributeStateValue) + assert.Nil(t, diags) + + var deploymentStateObject types.Object + diags = tfsdk.ValueFrom(context.Background(), tt.args.deploymentState, deploymentv2.DeploymentSchema().Type(), &deploymentStateObject) + assert.Nil(t, diags) + + deploymentStateValue, err := deploymentStateObject.ToTerraformValue(context.Background()) + assert.Nil(t, err) + + var deploymentPlanObject types.Object + diags = tfsdk.ValueFrom(context.Background(), tt.args.deploymentPlan, deploymentv2.DeploymentSchema().Type(), &deploymentPlanObject) + assert.Nil(t, diags) + + deploymentPlanValue, err := deploymentPlanObject.ToTerraformValue(context.Background()) + assert.Nil(t, err) + + req := tfsdk.ModifyAttributePlanRequest{ + AttributeConfig: attributeConfigValue, + AttributeState: attributeStateValue, + State: tfsdk.State{ + Raw: deploymentStateValue, + Schema: deploymentv2.DeploymentSchema(), + }, + Plan: tfsdk.Plan{ + Raw: deploymentPlanValue, + Schema: deploymentv2.DeploymentSchema(), + }, + } + + // the default plan value is `Unknown` ("known after apply") + // the plan modifier either keeps this value or uses the current state + // if test doesn't specify plan value, let's use the default (`Unknown`) value that is used by TF during plan modifier execution + var attributePlanValue attr.Value + if tt.args.attributePlan == nil { + diags = tfsdk.ValueFrom(context.Background(), types.Set{Unknown: true, ElemType: types.StringType}, types.SetType{ElemType: types.StringType}, &attributePlanValue) + } else { + diags = tfsdk.ValueFrom(context.Background(), tt.args.attributePlan, types.SetType{ElemType: types.StringType}, &attributePlanValue) + } + assert.Nil(t, diags) + + resp := tfsdk.ModifyAttributePlanResponse{AttributePlan: attributePlanValue} + + modifier.Modify(context.Background(), req, &resp) + + if tt.expectedDiags != nil { + assert.Equal(t, tt.expectedDiags, resp.Diagnostics) + return + } + + assert.Nil(t, resp.Diagnostics) + + if tt.expectedUnknown { + assert.True(t, resp.AttributePlan.IsUnknown(), "attributePlan should be unknown") + return + } + + var attributePlan []string + + diags = tfsdk.ValueAs(context.Background(), resp.AttributePlan, &attributePlan) + + assert.Nil(t, diags) + + assert.Equal(t, tt.expected, attributePlan) + }) + } +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_test.go index 36a951014..932fd5dbe 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_test.go @@ -59,7 +59,7 @@ func Test_UseNodeRoles(t *testing.T) { name: "it should fail when state version is invalid", args: args{ stateVersion: "invalid.state.version", - planVersion: "7.0.0", + planVersion: "7.10.0", }, expected: true, expectedDiags: func() diag.Diagnostics { diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier.go index fcfb3f53c..31eba0308 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier.go @@ -23,14 +23,13 @@ import ( "github.com/hashicorp/terraform-plugin-framework/tfsdk" ) -// Use `self` as value of `observability`'s `deployment_id` attribute func UseNodeTypesDefault() tfsdk.AttributePlanModifier { return nodeTypesDefault{} } type nodeTypesDefault struct{} -func (r nodeTypesDefault) Modify(ctx context.Context, req tfsdk.ModifyAttributePlanRequest, resp *tfsdk.ModifyAttributePlanResponse) { +func (m nodeTypesDefault) Modify(ctx context.Context, req tfsdk.ModifyAttributePlanRequest, resp *tfsdk.ModifyAttributePlanResponse) { useState, useNodeRoles := useStateAndNodeRolesInPlanModifiers(ctx, req, resp) if resp.Diagnostics.HasError() { diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier_test.go new file mode 100644 index 000000000..d0c141ba8 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier_test.go @@ -0,0 +1,208 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2_test + +import ( + "context" + "testing" + + deploymentv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/deployment/v2" + v2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" +) + +func Test_nodeTypesPlanModifier(t *testing.T) { + type args struct { + attributeState types.String + attributePlan *types.String + deploymentState *deploymentv2.Deployment + deploymentPlan deploymentv2.Deployment + } + tests := []struct { + name string + args args + expectedDiags diag.Diagnostics + expected types.String + }{ + { + name: "it should keep current plan value if it's defined", + args: args{ + attributePlan: &types.String{Value: "some value"}, + }, + expected: types.String{Value: "some value"}, + }, + + { + name: "it should not use state if state doesn't have `version`", + args: args{}, + expected: types.String{Unknown: true}, + }, + + { + name: "it should not use state if plan changed deployment template`", + args: args{ + deploymentState: &deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-storage-optimized-v3", + }, + }, + expected: types.String{Unknown: true}, + }, + + { + name: "it should not use state if plan version is less than 7.10.0 but the attribute state is null`", + args: args{ + attributeState: types.String{Null: true}, + deploymentState: &deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.9.0", + }, + }, + expected: types.String{Unknown: true}, + }, + + { + name: "it should not use state if plan version is changed over 7.10.0, but the attribute state is null`", + args: args{ + attributeState: types.String{Null: true}, + deploymentState: &deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.9.0", + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.10.1", + }, + }, + expected: types.String{Unknown: true}, + }, + + { + name: "it should not use state if both plan and state versions is or higher than 7.10.0, but the attribute state is not null`", + args: args{ + attributeState: types.String{Value: "false"}, + deploymentState: &deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.10.0", + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.10.0", + }, + }, + expected: types.String{Unknown: true}, + }, + + { + name: "it should use state if both plan and state versions is or higher than 7.10.0 and the attribute state is null`", + args: args{ + attributeState: types.String{Null: true}, + deploymentState: &deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.10.0", + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.10.0", + }, + }, + expected: types.String{Null: true}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + modifier := v2.UseNodeTypesDefault() + + // attributeConfig value is not used in the plan modifer + // it just should be known + var attributeConfigValue attr.Value + diags := tfsdk.ValueFrom(context.Background(), types.String{}, types.StringType, &attributeConfigValue) + assert.Nil(t, diags) + + var attributeStateValue attr.Value + diags = tfsdk.ValueFrom(context.Background(), tt.args.attributeState, types.StringType, &attributeStateValue) + assert.Nil(t, diags) + + var deploymentStateObject types.Object + diags = tfsdk.ValueFrom(context.Background(), tt.args.deploymentState, deploymentv2.DeploymentSchema().Type(), &deploymentStateObject) + assert.Nil(t, diags) + + deploymentStateValue, err := deploymentStateObject.ToTerraformValue(context.Background()) + assert.Nil(t, err) + + var deploymentPlanObject types.Object + diags = tfsdk.ValueFrom(context.Background(), tt.args.deploymentPlan, deploymentv2.DeploymentSchema().Type(), &deploymentPlanObject) + assert.Nil(t, diags) + + deploymentPlanValue, err := deploymentPlanObject.ToTerraformValue(context.Background()) + assert.Nil(t, err) + + req := tfsdk.ModifyAttributePlanRequest{ + AttributeConfig: attributeConfigValue, + AttributeState: attributeStateValue, + State: tfsdk.State{ + Raw: deploymentStateValue, + Schema: deploymentv2.DeploymentSchema(), + }, + Plan: tfsdk.Plan{ + Raw: deploymentPlanValue, + Schema: deploymentv2.DeploymentSchema(), + }, + } + + // the default plan value is `Unknown` ("known after apply") + // the plan modifier either keeps this value or uses the current state + // if test doesn't specify plan value, let's use the default (`Unknown`) value that is used by TF during plan modifier execution + var attributePlanValue attr.Value + if tt.args.attributePlan == nil { + tt.args.attributePlan = &types.String{Unknown: true} + } + diags = tfsdk.ValueFrom(context.Background(), tt.args.attributePlan, types.StringType, &attributePlanValue) + assert.Nil(t, diags) + + resp := tfsdk.ModifyAttributePlanResponse{AttributePlan: attributePlanValue} + + modifier.Modify(context.Background(), req, &resp) + + if tt.expectedDiags != nil { + assert.Equal(t, tt.expectedDiags, resp.Diagnostics) + } else { + assert.Nil(t, resp.Diagnostics) + + var attributePlan types.String + + diags := tfsdk.ValueAs(context.Background(), resp.AttributePlan, &attributePlan) + + assert.Nil(t, diags) + + assert.Equal(t, tt.expected, attributePlan) + } + + }) + } +} From 4800d9dba1dda73eda2e3e8a1d3ee8065cdc4430 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Thu, 5 Jan 2023 14:08:05 +0100 Subject: [PATCH 067/104] renaming + obsolete code removal --- .../elasticsearch/v2/node_roles.go | 4 +-- .../elasticsearch/v2/schema.go | 36 +++++++++---------- .../v2/topology_plan_modifier.go | 32 +++++------------ 3 files changed, 28 insertions(+), 44 deletions(-) diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go index 0d5bc6002..9db6baacf 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go @@ -148,7 +148,7 @@ func useStateAndNodeRolesInPlanModifiers(ctx context.Context, req tfsdk.ModifyAt } // if template changed return - templateChanged, diags := isAttributeChanged(ctx, path.Root("deployment_template_id"), req) + templateChanged, diags := attributeChanged(ctx, path.Root("deployment_template_id"), req) resp.Diagnostics.Append(diags...) @@ -186,7 +186,7 @@ func useStateAndNodeRolesInPlanModifiers(ctx context.Context, req tfsdk.ModifyAt return true, useNodeRoles } -func isAttributeChanged(ctx context.Context, p path.Path, req tfsdk.ModifyAttributePlanRequest) (bool, diag.Diagnostics) { +func attributeChanged(ctx context.Context, p path.Path, req tfsdk.ModifyAttributePlanRequest) (bool, diag.Diagnostics) { var planValue attr.Value if diags := req.Plan.GetAttribute(ctx, p, &planValue); diags.HasError() { diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go index d089d5137..55cf21bd1 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go @@ -96,13 +96,13 @@ func ElasticsearchSchema() tfsdk.Attribute { Computed: true, }, - "hot": ElasticsearchTierSchema("'hot' optional topology element", true, "hot"), - "coordinating": ElasticsearchTierSchema("'coordinating' optional topology element", false, "coordinating"), - "master": ElasticsearchTierSchema("'master' optional topology element", false, "master"), - "warm": ElasticsearchTierSchema("'warm' optional topology element", false, "warm"), - "cold": ElasticsearchTierSchema("'cold' optional topology element", false, "cold"), - "frozen": ElasticsearchTierSchema("'frozen' optional topology element", false, "frozen"), - "ml": ElasticsearchTierSchema("'ml' optional topology element", false, "ml"), + "hot": ElasticsearchTopologySchema("'hot' topology element", true, "hot"), + "coordinating": ElasticsearchTopologySchema("'coordinating' topology element", false, "coordinating"), + "master": ElasticsearchTopologySchema("'master' topology element", false, "master"), + "warm": ElasticsearchTopologySchema("'warm' topology element", false, "warm"), + "cold": ElasticsearchTopologySchema("'cold' topology element", false, "cold"), + "frozen": ElasticsearchTopologySchema("'frozen' topology element", false, "frozen"), + "ml": ElasticsearchTopologySchema("'ml' topology element", false, "ml"), "trust_account": ElasticsearchTrustAccountSchema(), @@ -168,7 +168,7 @@ func ElasticsearchConfigSchema() tfsdk.Attribute { } } -func ElasticsearchTopologyAutoscalingSchema(tierName string) tfsdk.Attribute { +func ElasticsearchTopologyAutoscalingSchema(topologyAttributeName string) tfsdk.Attribute { return tfsdk.Attribute{ Description: "Optional Elasticsearch autoscaling settings, such a maximum and minimum size and resources.", Required: true, @@ -179,7 +179,7 @@ func ElasticsearchTopologyAutoscalingSchema(tierName string) tfsdk.Attribute { Optional: true, Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ - UseTierStateForUnknown(tierName), + UseTopologyStateForUnknown(topologyAttributeName), }, }, "max_size": { @@ -188,7 +188,7 @@ func ElasticsearchTopologyAutoscalingSchema(tierName string) tfsdk.Attribute { Optional: true, Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ - UseTierStateForUnknown(tierName), + UseTopologyStateForUnknown(topologyAttributeName), }, }, "min_size_resource": { @@ -197,7 +197,7 @@ func ElasticsearchTopologyAutoscalingSchema(tierName string) tfsdk.Attribute { Optional: true, Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ - UseTierStateForUnknown(tierName), + UseTopologyStateForUnknown(topologyAttributeName), }, }, "min_size": { @@ -206,7 +206,7 @@ func ElasticsearchTopologyAutoscalingSchema(tierName string) tfsdk.Attribute { Optional: true, Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ - UseTierStateForUnknown(tierName), + UseTopologyStateForUnknown(topologyAttributeName), }, }, "policy_override_json": { @@ -214,7 +214,7 @@ func ElasticsearchTopologyAutoscalingSchema(tierName string) tfsdk.Attribute { Description: "Computed policy overrides set directly via the API or other clients.", Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ - UseTierStateForUnknown(tierName), + UseTopologyStateForUnknown(topologyAttributeName), }, }, }), @@ -375,7 +375,7 @@ func ElasticsearchTrustExternalSchema() tfsdk.Attribute { } } -func ElasticsearchTierSchema(description string, required bool, tierName string) tfsdk.Attribute { +func ElasticsearchTopologySchema(description string, required bool, topologyAttributeName string) tfsdk.Attribute { return tfsdk.Attribute{ Optional: !required, // it should be Computed but Computed triggers TF weird behaviour that leads to unempty plan for zero change config @@ -388,7 +388,7 @@ func ElasticsearchTierSchema(description string, required bool, tierName string) Description: `Computed Instance Configuration ID of the topology element`, Computed: true, PlanModifiers: tfsdk.AttributePlanModifiers{ - UseTierStateForUnknown(tierName), + UseTopologyStateForUnknown(topologyAttributeName), }, }, "size": { @@ -397,7 +397,7 @@ func ElasticsearchTierSchema(description string, required bool, tierName string) Computed: true, Optional: true, PlanModifiers: tfsdk.AttributePlanModifiers{ - UseTierStateForUnknown(tierName), + UseTopologyStateForUnknown(topologyAttributeName), }, }, "size_resource": { @@ -416,7 +416,7 @@ func ElasticsearchTierSchema(description string, required bool, tierName string) Optional: true, PlanModifiers: tfsdk.AttributePlanModifiers{ resource.UseStateForUnknown(), - UseTierStateForUnknown(tierName), + UseTopologyStateForUnknown(topologyAttributeName), }, }, "node_type_data": { @@ -465,7 +465,7 @@ func ElasticsearchTierSchema(description string, required bool, tierName string) UseNodeRolesDefault(), }, }, - "autoscaling": ElasticsearchTopologyAutoscalingSchema(tierName), + "autoscaling": ElasticsearchTopologyAutoscalingSchema(topologyAttributeName), }), } } diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier.go b/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier.go index f8391a78c..1aaea41bd 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier.go @@ -26,15 +26,15 @@ import ( "github.com/hashicorp/terraform-plugin-framework/tfsdk" ) -func UseTierStateForUnknown(tier string) tfsdk.AttributePlanModifier { - return useTierState{tier: tier} +func UseTopologyStateForUnknown(topologyAttributeName string) tfsdk.AttributePlanModifier { + return useTopologyState{topologyAttributeName: topologyAttributeName} } -type useTierState struct { - tier string +type useTopologyState struct { + topologyAttributeName string } -func (m useTierState) Modify(ctx context.Context, req tfsdk.ModifyAttributePlanRequest, resp *tfsdk.ModifyAttributePlanResponse) { +func (m useTopologyState) Modify(ctx context.Context, req tfsdk.ModifyAttributePlanRequest, resp *tfsdk.ModifyAttributePlanResponse) { if req.AttributeState == nil || resp.AttributePlan == nil || req.AttributeConfig == nil { return } @@ -50,7 +50,7 @@ func (m useTierState) Modify(ctx context.Context, req tfsdk.ModifyAttributePlanR // we check tier's state instead of tier attribute's state because nil can be a valid state // e.g. `aws-io-optimized-v2` template doesn't specify `autoscaling_min` for `hot_content` so `min_size` state is nil - tierStateDefined, diags := attributeStateDefined(ctx, path.Root("elasticsearch").AtName(m.tier), req) + tierStateDefined, diags := attributeStateDefined(ctx, path.Root("elasticsearch").AtName(m.topologyAttributeName), req) resp.Diagnostics.Append(diags...) @@ -77,30 +77,14 @@ func (m useTierState) Modify(ctx context.Context, req tfsdk.ModifyAttributePlanR resp.AttributePlan = req.AttributeState } -func (r useTierState) Description(ctx context.Context) string { +func (r useTopologyState) Description(ctx context.Context) string { return "Use tier's state if it's defined and template is the same." } -func (r useTierState) MarkdownDescription(ctx context.Context) string { +func (r useTopologyState) MarkdownDescription(ctx context.Context) string { return "Use tier's state if it's defined and template is the same." } -func attributeChanged(ctx context.Context, p path.Path, req tfsdk.ModifyAttributePlanRequest) (bool, diag.Diagnostics) { - var planValue attr.Value - - if diags := req.Plan.GetAttribute(ctx, p, &planValue); diags.HasError() { - return false, diags - } - - var stateValue attr.Value - - if diags := req.State.GetAttribute(ctx, p, &stateValue); diags.HasError() { - return false, diags - } - - return !planValue.Equal(stateValue), nil -} - func attributeStateDefined(ctx context.Context, p path.Path, req tfsdk.ModifyAttributePlanRequest) (bool, diag.Diagnostics) { var val attr.Value From 3b4e53cdc8e1d5df7426b5c7e508c663712365dc Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Thu, 5 Jan 2023 18:02:04 +0100 Subject: [PATCH 068/104] unit test for topology plan modifer + some refactoring --- .../v2/node_roles_plan_modifier_test.go | 41 +--- .../v2/node_types_plan_modifier_test.go | 52 ++--- .../v2/topology_plan_modifier.go | 9 +- .../v2/topology_plan_modifier_test.go | 201 ++++++++++++++++++ 4 files changed, 229 insertions(+), 74 deletions(-) create mode 100644 ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier_test.go diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier_test.go index fdff82c80..c727180ce 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier_test.go @@ -23,8 +23,6 @@ import ( deploymentv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/deployment/v2" v2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" - "github.com/hashicorp/terraform-plugin-framework/attr" - "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" @@ -40,7 +38,6 @@ func Test_nodeRolesPlanModifier(t *testing.T) { tests := []struct { name string args args - expectedDiags diag.Diagnostics expected []string expectedUnknown bool }{ @@ -167,27 +164,13 @@ func Test_nodeRolesPlanModifier(t *testing.T) { // attributeConfig value is not used in the plan modifer // it just should be known - var attributeConfigValue attr.Value - diags := tfsdk.ValueFrom(context.Background(), []string{}, types.SetType{ElemType: types.StringType}, &attributeConfigValue) - assert.Nil(t, diags) - - var attributeStateValue attr.Value - diags = tfsdk.ValueFrom(context.Background(), tt.args.attributeState, types.SetType{ElemType: types.StringType}, &attributeStateValue) - assert.Nil(t, diags) - - var deploymentStateObject types.Object - diags = tfsdk.ValueFrom(context.Background(), tt.args.deploymentState, deploymentv2.DeploymentSchema().Type(), &deploymentStateObject) - assert.Nil(t, diags) + attributeConfigValue := attrValueFromGoTypeValue(t, []string{}, types.SetType{ElemType: types.StringType}) - deploymentStateValue, err := deploymentStateObject.ToTerraformValue(context.Background()) - assert.Nil(t, err) + attributeStateValue := attrValueFromGoTypeValue(t, tt.args.attributeState, types.SetType{ElemType: types.StringType}) - var deploymentPlanObject types.Object - diags = tfsdk.ValueFrom(context.Background(), tt.args.deploymentPlan, deploymentv2.DeploymentSchema().Type(), &deploymentPlanObject) - assert.Nil(t, diags) + deploymentStateValue := tftypesValueFromGoTypeValue(t, tt.args.deploymentState, deploymentv2.DeploymentSchema().Type()) - deploymentPlanValue, err := deploymentPlanObject.ToTerraformValue(context.Background()) - assert.Nil(t, err) + deploymentPlanValue := tftypesValueFromGoTypeValue(t, tt.args.deploymentPlan, deploymentv2.DeploymentSchema().Type()) req := tfsdk.ModifyAttributePlanRequest{ AttributeConfig: attributeConfigValue, @@ -205,23 +188,15 @@ func Test_nodeRolesPlanModifier(t *testing.T) { // the default plan value is `Unknown` ("known after apply") // the plan modifier either keeps this value or uses the current state // if test doesn't specify plan value, let's use the default (`Unknown`) value that is used by TF during plan modifier execution - var attributePlanValue attr.Value - if tt.args.attributePlan == nil { - diags = tfsdk.ValueFrom(context.Background(), types.Set{Unknown: true, ElemType: types.StringType}, types.SetType{ElemType: types.StringType}, &attributePlanValue) - } else { - diags = tfsdk.ValueFrom(context.Background(), tt.args.attributePlan, types.SetType{ElemType: types.StringType}, &attributePlanValue) + attributePlanValue := unknownValueFromAttrType(t, types.SetType{ElemType: types.StringType}) + if tt.args.attributePlan != nil { + attributePlanValue = attrValueFromGoTypeValue(t, tt.args.attributePlan, types.SetType{ElemType: types.StringType}) } - assert.Nil(t, diags) resp := tfsdk.ModifyAttributePlanResponse{AttributePlan: attributePlanValue} modifier.Modify(context.Background(), req, &resp) - if tt.expectedDiags != nil { - assert.Equal(t, tt.expectedDiags, resp.Diagnostics) - return - } - assert.Nil(t, resp.Diagnostics) if tt.expectedUnknown { @@ -231,7 +206,7 @@ func Test_nodeRolesPlanModifier(t *testing.T) { var attributePlan []string - diags = tfsdk.ValueAs(context.Background(), resp.AttributePlan, &attributePlan) + diags := tfsdk.ValueAs(context.Background(), resp.AttributePlan, &attributePlan) assert.Nil(t, diags) diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier_test.go index d0c141ba8..dfd14863c 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier_test.go @@ -23,8 +23,6 @@ import ( deploymentv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/deployment/v2" v2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" - "github.com/hashicorp/terraform-plugin-framework/attr" - "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" @@ -38,10 +36,9 @@ func Test_nodeTypesPlanModifier(t *testing.T) { deploymentPlan deploymentv2.Deployment } tests := []struct { - name string - args args - expectedDiags diag.Diagnostics - expected types.String + name string + args args + expected types.String }{ { name: "it should keep current plan value if it's defined", @@ -140,27 +137,13 @@ func Test_nodeTypesPlanModifier(t *testing.T) { // attributeConfig value is not used in the plan modifer // it just should be known - var attributeConfigValue attr.Value - diags := tfsdk.ValueFrom(context.Background(), types.String{}, types.StringType, &attributeConfigValue) - assert.Nil(t, diags) - - var attributeStateValue attr.Value - diags = tfsdk.ValueFrom(context.Background(), tt.args.attributeState, types.StringType, &attributeStateValue) - assert.Nil(t, diags) + attributeConfigValue := attrValueFromGoTypeValue(t, types.String{}, types.StringType) - var deploymentStateObject types.Object - diags = tfsdk.ValueFrom(context.Background(), tt.args.deploymentState, deploymentv2.DeploymentSchema().Type(), &deploymentStateObject) - assert.Nil(t, diags) - - deploymentStateValue, err := deploymentStateObject.ToTerraformValue(context.Background()) - assert.Nil(t, err) + attributeStateValue := attrValueFromGoTypeValue(t, tt.args.attributeState, types.StringType) - var deploymentPlanObject types.Object - diags = tfsdk.ValueFrom(context.Background(), tt.args.deploymentPlan, deploymentv2.DeploymentSchema().Type(), &deploymentPlanObject) - assert.Nil(t, diags) + deploymentStateValue := tftypesValueFromGoTypeValue(t, tt.args.deploymentState, deploymentv2.DeploymentSchema().Type()) - deploymentPlanValue, err := deploymentPlanObject.ToTerraformValue(context.Background()) - assert.Nil(t, err) + deploymentPlanValue := tftypesValueFromGoTypeValue(t, tt.args.deploymentPlan, deploymentv2.DeploymentSchema().Type()) req := tfsdk.ModifyAttributePlanRequest{ AttributeConfig: attributeConfigValue, @@ -178,31 +161,26 @@ func Test_nodeTypesPlanModifier(t *testing.T) { // the default plan value is `Unknown` ("known after apply") // the plan modifier either keeps this value or uses the current state // if test doesn't specify plan value, let's use the default (`Unknown`) value that is used by TF during plan modifier execution - var attributePlanValue attr.Value + if tt.args.attributePlan == nil { tt.args.attributePlan = &types.String{Unknown: true} } - diags = tfsdk.ValueFrom(context.Background(), tt.args.attributePlan, types.StringType, &attributePlanValue) - assert.Nil(t, diags) + + attributePlanValue := attrValueFromGoTypeValue(t, tt.args.attributePlan, types.StringType) resp := tfsdk.ModifyAttributePlanResponse{AttributePlan: attributePlanValue} modifier.Modify(context.Background(), req, &resp) - if tt.expectedDiags != nil { - assert.Equal(t, tt.expectedDiags, resp.Diagnostics) - } else { - assert.Nil(t, resp.Diagnostics) + assert.Nil(t, resp.Diagnostics) - var attributePlan types.String + var attributePlan types.String - diags := tfsdk.ValueAs(context.Background(), resp.AttributePlan, &attributePlan) + diags := tfsdk.ValueAs(context.Background(), resp.AttributePlan, &attributePlan) - assert.Nil(t, diags) - - assert.Equal(t, tt.expected, attributePlan) - } + assert.Nil(t, diags) + assert.Equal(t, tt.expected, attributePlan) }) } } diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier.go b/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier.go index 1aaea41bd..335853c98 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier.go @@ -26,6 +26,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/tfsdk" ) +// Use current state for a topology's attribute if the topology's state is not nil and the template attribute has not changed func UseTopologyStateForUnknown(topologyAttributeName string) tfsdk.AttributePlanModifier { return useTopologyState{topologyAttributeName: topologyAttributeName} } @@ -48,9 +49,9 @@ func (m useTopologyState) Modify(ctx context.Context, req tfsdk.ModifyAttributeP return } - // we check tier's state instead of tier attribute's state because nil can be a valid state - // e.g. `aws-io-optimized-v2` template doesn't specify `autoscaling_min` for `hot_content` so `min_size` state is nil - tierStateDefined, diags := attributeStateDefined(ctx, path.Root("elasticsearch").AtName(m.topologyAttributeName), req) + // we check state of entire topology state instead of topology attributes states because nil can be a valid state for some topology attributes + // e.g. `aws-io-optimized-v2` template doesn't specify `autoscaling_min` for `hot_content` so `min_size`'s state is nil + topologyStateDefined, diags := attributeStateDefined(ctx, path.Root("elasticsearch").AtName(m.topologyAttributeName), req) resp.Diagnostics.Append(diags...) @@ -58,7 +59,7 @@ func (m useTopologyState) Modify(ctx context.Context, req tfsdk.ModifyAttributeP return } - if !tierStateDefined { + if !topologyStateDefined { return } diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier_test.go new file mode 100644 index 000000000..b31d6db79 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier_test.go @@ -0,0 +1,201 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2_test + +import ( + "context" + "testing" + + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + deploymentv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/deployment/v2" + v2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-go/tftypes" + "github.com/stretchr/testify/assert" +) + +func Test_topologyPlanModifier(t *testing.T) { + type args struct { + // the actual attribute type doesn't matter + attributeState types.String + attributePlan types.String + deploymentState deploymentv2.Deployment + deploymentPlan deploymentv2.Deployment + } + tests := []struct { + name string + args args + expected types.String + }{ + { + name: "it should keep the current plan value if the plan is known", + args: args{ + attributeState: types.String{Value: "state value"}, + attributePlan: types.String{Value: "plan value"}, + }, + expected: types.String{Value: "plan value"}, + }, + + { + name: "it should not use state if there is no such topology in the state", + args: args{ + attributeState: types.String{Null: true}, + attributePlan: types.String{Unknown: true}, + deploymentState: deploymentv2.Deployment{ + Elasticsearch: &v2.Elasticsearch{}, + }, + }, + expected: types.String{Unknown: true}, + }, + + { + name: "it should not use state if the plan changed the template attribute", + args: args{ + attributeState: types.String{Value: "1g"}, + attributePlan: types.String{Unknown: true}, + deploymentState: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Elasticsearch: &v2.Elasticsearch{ + HotTier: v2.CreateTierForTest("hot_content", v2.ElasticsearchTopology{ + Autoscaling: &v2.ElasticsearchTopologyAutoscaling{ + MinSize: ec.String("1g"), + }, + }), + }, + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-storage-optimized-v3", + Elasticsearch: &v2.Elasticsearch{ + HotTier: v2.CreateTierForTest("hot_content", v2.ElasticsearchTopology{ + Autoscaling: &v2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + }, + expected: types.String{Unknown: true}, + }, + + { + name: "it should use the current state if the state is null, the topology is defined in the state and the template has not changed", + args: args{ + attributeState: types.String{Null: true}, + attributePlan: types.String{Unknown: true}, + deploymentState: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Elasticsearch: &v2.Elasticsearch{ + HotTier: v2.CreateTierForTest("hot_content", v2.ElasticsearchTopology{ + Autoscaling: &v2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Elasticsearch: &v2.Elasticsearch{ + HotTier: v2.CreateTierForTest("hot_content", v2.ElasticsearchTopology{ + Autoscaling: &v2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + }, + expected: types.String{Null: true}, + }, + + { + name: "it should use the current state if the topology is defined in the state and the template has not changed", + args: args{ + attributeState: types.String{Value: "1g"}, + attributePlan: types.String{Unknown: true}, + deploymentState: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Elasticsearch: &v2.Elasticsearch{ + HotTier: v2.CreateTierForTest("hot_content", v2.ElasticsearchTopology{ + Autoscaling: &v2.ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("1g"), + }, + }), + }, + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Elasticsearch: &v2.Elasticsearch{ + HotTier: v2.CreateTierForTest("hot_content", v2.ElasticsearchTopology{ + Autoscaling: &v2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + }, + expected: types.String{Value: "1g"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + modifier := v2.UseTopologyStateForUnknown("hot") + + deploymentStateValue := tftypesValueFromGoTypeValue(t, tt.args.deploymentState, deploymentv2.DeploymentSchema().Type()) + + deploymentPlanValue := tftypesValueFromGoTypeValue(t, tt.args.deploymentPlan, deploymentv2.DeploymentSchema().Type()) + + req := tfsdk.ModifyAttributePlanRequest{ + // attributeConfig value is not used in the plan modifer + // it just should be known + AttributeConfig: types.String{}, + AttributeState: tt.args.attributeState, + State: tfsdk.State{ + Raw: deploymentStateValue, + Schema: deploymentv2.DeploymentSchema(), + }, + Plan: tfsdk.Plan{ + Raw: deploymentPlanValue, + Schema: deploymentv2.DeploymentSchema(), + }, + } + + resp := tfsdk.ModifyAttributePlanResponse{AttributePlan: tt.args.attributePlan} + + modifier.Modify(context.Background(), req, &resp) + + assert.Nil(t, resp.Diagnostics) + + assert.Equal(t, tt.expected, resp.AttributePlan) + }) + } +} + +func attrValueFromGoTypeValue(t *testing.T, goValue any, attributeType attr.Type) attr.Value { + var attrValue attr.Value + diags := tfsdk.ValueFrom(context.Background(), goValue, attributeType, &attrValue) + assert.Nil(t, diags) + return attrValue +} + +func tftypesValueFromGoTypeValue(t *testing.T, goValue any, attributeType attr.Type) tftypes.Value { + attrValue := attrValueFromGoTypeValue(t, goValue, attributeType) + tftypesValue, err := attrValue.ToTerraformValue(context.Background()) + assert.Nil(t, err) + return tftypesValue +} + +func unknownValueFromAttrType(t *testing.T, attributeType attr.Type) attr.Value { + tfVal := tftypes.NewValue(attributeType.TerraformType(context.Background()), tftypes.UnknownValue) + val, err := attributeType.ValueFromTerraform(context.Background(), tfVal) + assert.Nil(t, err) + return val +} From 8d905242f6b0ad7a56612ee86799ba7ddae51db9 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 11 Jan 2023 15:31:10 +0100 Subject: [PATCH 069/104] deploymentdatasource: change flatteners to return the result --- .../deploymentdatasource/datasource.go | 33 ++++++++++++++----- .../deploymentdatasource/flatteners_apm.go | 26 ++++++++------- .../flatteners_apm_test.go | 4 ++- .../flatteners_elasticsearch.go | 24 +++++++++----- .../flatteners_elasticsearch_test.go | 5 ++- .../flatteners_enterprise_search.go | 25 +++++++------- .../flatteners_enterprise_search_test.go | 4 ++- .../flatteners_integrations_server.go | 25 +++++++------- .../flatteners_integrations_server_test.go | 4 ++- .../deploymentdatasource/flatteners_kibana.go | 27 ++++++++------- .../flatteners_kibana_test.go | 4 ++- .../flatteners_observability.go | 15 +++++---- .../flatteners_observability_test.go | 4 ++- .../flatteners_traffic_filter.go | 12 ++++--- .../flatteners_traffic_filter_test.go | 4 ++- 15 files changed, 135 insertions(+), 81 deletions(-) diff --git a/ec/ecdatasource/deploymentdatasource/datasource.go b/ec/ecdatasource/deploymentdatasource/datasource.go index a38fa8840..afcee8670 100644 --- a/ec/ecdatasource/deploymentdatasource/datasource.go +++ b/ec/ecdatasource/deploymentdatasource/datasource.go @@ -97,7 +97,7 @@ func (d DataSource) Read(ctx context.Context, request datasource.ReadRequest, re } func modelToState(ctx context.Context, res *models.DeploymentGetResponse, state *modelV0) diag.Diagnostics { - var diags diag.Diagnostics + var diagsnostics diag.Diagnostics state.Name = types.String{Value: *res.Name} state.Healthy = types.Bool{Value: *res.Healthy} @@ -112,17 +112,32 @@ func modelToState(ctx context.Context, res *models.DeploymentGetResponse, state state.DeploymentTemplateID = types.String{Value: *es.Info.PlanInfo.Current.Plan.DeploymentTemplate.ID} } - diags.Append(flattenTrafficFiltering(ctx, res.Settings, &state.TrafficFilter)...) - diags.Append(flattenObservability(ctx, res.Settings, &state.Observability)...) - diags.Append(flattenElasticsearchResources(ctx, res.Resources.Elasticsearch, &state.Elasticsearch)...) - diags.Append(flattenKibanaResources(ctx, res.Resources.Kibana, &state.Kibana)...) - diags.Append(flattenApmResources(ctx, res.Resources.Apm, &state.Apm)...) - diags.Append(flattenIntegrationsServerResources(ctx, res.Resources.IntegrationsServer, &state.IntegrationsServer)...) - diags.Append(flattenEnterpriseSearchResources(ctx, res.Resources.EnterpriseSearch, &state.EnterpriseSearch)...) + var diags diag.Diagnostics + + state.TrafficFilter, diags = flattenTrafficFiltering(ctx, res.Settings) + diagsnostics.Append(diags...) + + state.Observability, diags = flattenObservability(ctx, res.Settings) + diagsnostics.Append(diags...) + + state.Elasticsearch, diags = flattenElasticsearchResources(ctx, res.Resources.Elasticsearch) + diagsnostics.Append(diags...) + + state.Kibana, diags = flattenKibanaResources(ctx, res.Resources.Kibana) + diagsnostics.Append(diags...) + + state.Apm, diags = flattenApmResources(ctx, res.Resources.Apm) + diagsnostics.Append(diags...) + + state.IntegrationsServer, diags = flattenIntegrationsServerResources(ctx, res.Resources.IntegrationsServer) + diagsnostics.Append(diags...) + + state.EnterpriseSearch, diags = flattenEnterpriseSearchResources(ctx, res.Resources.EnterpriseSearch) + diagsnostics.Append(diags...) if res.Metadata != nil { state.Tags = converters.ModelsTagsToTypesMap(res.Metadata.Tags) } - return diags + return diagsnostics } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_apm.go b/ec/ecdatasource/deploymentdatasource/flatteners_apm.go index c30160feb..6c6a6a70d 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_apm.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_apm.go @@ -32,8 +32,8 @@ import ( // flattenApmResources takes in Apm resource models and returns its // flattened form. -func flattenApmResources(ctx context.Context, in []*models.ApmResourceInfo, target interface{}) diag.Diagnostics { - var diags diag.Diagnostics +func flattenApmResources(ctx context.Context, in []*models.ApmResourceInfo) (types.List, diag.Diagnostics) { + var diagnostics diag.Diagnostics var result = make([]apmResourceInfoModelV0, 0, len(in)) for _, res := range in { @@ -69,7 +69,9 @@ func flattenApmResources(ctx context.Context, in []*models.ApmResourceInfo, targ model.Version = types.String{Value: plan.Apm.Version} } - diags.Append(flattenApmTopology(ctx, plan, &model.Topology)...) + var diags diag.Diagnostics + model.Topology, diags = flattenApmTopology(ctx, plan) + diagnostics.Append(diags...) } if res.Info.Metadata != nil { @@ -80,17 +82,17 @@ func flattenApmResources(ctx context.Context, in []*models.ApmResourceInfo, targ result = append(result, model) } - diags.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ + var target types.List + diagnostics.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ ElemType: types.ObjectType{ AttrTypes: apmResourceInfoAttrTypes(), }, - }, target)...) + }, &target)...) - return diags + return target, diagnostics } -func flattenApmTopology(ctx context.Context, plan *models.ApmPlan, target interface{}) diag.Diagnostics { - var diags diag.Diagnostics +func flattenApmTopology(ctx context.Context, plan *models.ApmPlan) (types.List, diag.Diagnostics) { var result = make([]apmTopologyModelV0, 0, len(plan.ClusterTopology)) for _, topology := range plan.ClusterTopology { var model apmTopologyModelV0 @@ -111,13 +113,15 @@ func flattenApmTopology(ctx context.Context, plan *models.ApmPlan, target interf result = append(result, model) } - diags.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ + var target types.List + + diags := tfsdk.ValueFrom(ctx, result, types.ListType{ ElemType: types.ObjectType{ AttrTypes: apmTopologyAttrTypes(), }, - }, target)...) + }, &target) - return diags + return target, diags } func isApmSizePopulated(topology *models.ApmTopologyElement) bool { diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go index 8fa88c230..b927e0f63 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go @@ -22,6 +22,7 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" @@ -115,7 +116,8 @@ func Test_flattenApmResource(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var newState modelV0 - diags := flattenApmResources(context.Background(), tt.args.in, &newState.Apm) + var diags diag.Diagnostics + newState.Apm, diags = flattenApmResources(context.Background(), tt.args.in) assert.Empty(t, diags) var got []apmResourceInfoModelV0 diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go index 1e4fb5734..7849b0eb1 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go @@ -35,8 +35,8 @@ import ( // flattenElasticsearchResources takes in Elasticsearch resource models and returns its // flattened form. -func flattenElasticsearchResources(ctx context.Context, in []*models.ElasticsearchResourceInfo, target interface{}) diag.Diagnostics { - var diags diag.Diagnostics +func flattenElasticsearchResources(ctx context.Context, in []*models.ElasticsearchResourceInfo) (types.List, diag.Diagnostics) { + var diagnostics diag.Diagnostics var result = make([]elasticsearchResourceInfoModelV0, 0, len(in)) for _, res := range in { @@ -72,7 +72,9 @@ func flattenElasticsearchResources(ctx context.Context, in []*models.Elasticsear model.Autoscale = types.String{Value: strconv.FormatBool(*plan.AutoscalingEnabled)} } - diags.Append(flattenElasticsearchTopology(ctx, plan, &model.Topology)...) + var diags diag.Diagnostics + model.Topology, diags = flattenElasticsearchTopology(ctx, plan) + diagnostics.Append(diags...) } if res.Info.Metadata != nil { @@ -84,16 +86,18 @@ func flattenElasticsearchResources(ctx context.Context, in []*models.Elasticsear result = append(result, model) } - diags.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ + var target types.List + + diagnostics.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ ElemType: types.ObjectType{ AttrTypes: elasticsearchResourceInfoAttrTypes(), }, - }, target)...) + }, &target)...) - return diags + return target, diagnostics } -func flattenElasticsearchTopology(ctx context.Context, plan *models.ElasticsearchClusterPlan, target interface{}) diag.Diagnostics { +func flattenElasticsearchTopology(ctx context.Context, plan *models.ElasticsearchClusterPlan) (types.List, diag.Diagnostics) { var diags diag.Diagnostics var result = make([]elasticsearchTopologyModelV0, 0, len(plan.ClusterTopology)) for _, topology := range plan.ClusterTopology { @@ -170,13 +174,15 @@ func flattenElasticsearchTopology(ctx context.Context, plan *models.Elasticsearc result = append(result, model) } + var target types.List + diags.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ ElemType: types.ObjectType{ AttrTypes: elasticsearchTopologyAttrTypes(), }, - }, target)...) + }, &target)...) - return diags + return target, diags } func isElasticsearchSizePopulated(topology *models.ElasticsearchClusterTopologyElement) bool { diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go index a3ff87b83..43da2e9cf 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go @@ -22,6 +22,7 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" @@ -165,7 +166,9 @@ func Test_flattenElasticsearchResources(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var model modelV0 - diags := flattenElasticsearchResources(context.Background(), tt.args.in, &model.Elasticsearch) + var diags diag.Diagnostics + + model.Elasticsearch, diags = flattenElasticsearchResources(context.Background(), tt.args.in) assert.Empty(t, diags) var got []elasticsearchResourceInfoModelV0 diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search.go b/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search.go index 29c102c6c..bd306fd43 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search.go @@ -32,8 +32,8 @@ import ( // flattenEnterpriseSearchResources takes in EnterpriseSearch resource models and returns its // flattened form. -func flattenEnterpriseSearchResources(ctx context.Context, in []*models.EnterpriseSearchResourceInfo, target interface{}) diag.Diagnostics { - var diags diag.Diagnostics +func flattenEnterpriseSearchResources(ctx context.Context, in []*models.EnterpriseSearchResourceInfo) (types.List, diag.Diagnostics) { + var diagnostics diag.Diagnostics var result = make([]enterpriseSearchResourceInfoModelV0, 0, len(in)) for _, res := range in { @@ -69,7 +69,9 @@ func flattenEnterpriseSearchResources(ctx context.Context, in []*models.Enterpri model.Version = types.String{Value: plan.EnterpriseSearch.Version} } - diags.Append(flattenEnterpriseSearchTopology(ctx, plan, &model.Topology)...) + var diags diag.Diagnostics + model.Topology, diags = flattenEnterpriseSearchTopology(ctx, plan) + diagnostics.Append(diags...) } if res.Info.Metadata != nil { @@ -80,17 +82,17 @@ func flattenEnterpriseSearchResources(ctx context.Context, in []*models.Enterpri result = append(result, model) } - diags.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ + var target types.List + diagnostics.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ ElemType: types.ObjectType{ AttrTypes: enterpriseSearchResourceInfoAttrTypes(), }, - }, target)...) + }, &target)...) - return diags + return target, diagnostics } -func flattenEnterpriseSearchTopology(ctx context.Context, plan *models.EnterpriseSearchPlan, target interface{}) diag.Diagnostics { - var diags diag.Diagnostics +func flattenEnterpriseSearchTopology(ctx context.Context, plan *models.EnterpriseSearchPlan) (types.List, diag.Diagnostics) { var result = make([]enterpriseSearchTopologyModelV0, 0, len(plan.ClusterTopology)) for _, topology := range plan.ClusterTopology { var model enterpriseSearchTopologyModelV0 @@ -125,13 +127,14 @@ func flattenEnterpriseSearchTopology(ctx context.Context, plan *models.Enterpris result = append(result, model) } - diags.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ + var target types.List + diags := tfsdk.ValueFrom(ctx, result, types.ListType{ ElemType: types.ObjectType{ AttrTypes: enterpriseSearchTopologyAttrTypes(), }, - }, target)...) + }, &target) - return diags + return target, diags } func isEsSizePopulated(topology *models.EnterpriseSearchTopologyElement) bool { diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go index 928054ecb..aa8c6bf3b 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go @@ -22,6 +22,7 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" @@ -130,7 +131,8 @@ func Test_flattenEnterpriseSearchResource(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var model modelV0 - diags := flattenEnterpriseSearchResources(context.Background(), tt.args.in, &model.EnterpriseSearch) + var diags diag.Diagnostics + model.EnterpriseSearch, diags = flattenEnterpriseSearchResources(context.Background(), tt.args.in) assert.Empty(t, diags) var got []enterpriseSearchResourceInfoModelV0 diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server.go b/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server.go index 1ba715d81..08000c06b 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server.go @@ -32,8 +32,8 @@ import ( // flattenIntegrationsServerResources takes in IntegrationsServer resource models and returns its // flattened form. -func flattenIntegrationsServerResources(ctx context.Context, in []*models.IntegrationsServerResourceInfo, target interface{}) diag.Diagnostics { - var diags diag.Diagnostics +func flattenIntegrationsServerResources(ctx context.Context, in []*models.IntegrationsServerResourceInfo) (types.List, diag.Diagnostics) { + var diagnostics diag.Diagnostics var result = make([]integrationsServerResourceInfoModelV0, 0, len(in)) for _, res := range in { @@ -69,7 +69,9 @@ func flattenIntegrationsServerResources(ctx context.Context, in []*models.Integr model.Version = types.String{Value: plan.IntegrationsServer.Version} } - diags.Append(flattenIntegrationsServerTopology(ctx, plan, &model.Topology)...) + var diags diag.Diagnostics + model.Topology, diags = flattenIntegrationsServerTopology(ctx, plan) + diagnostics.Append(diags...) } if res.Info.Metadata != nil { @@ -80,17 +82,17 @@ func flattenIntegrationsServerResources(ctx context.Context, in []*models.Integr result = append(result, model) } - diags.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ + var target types.List + diagnostics.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ ElemType: types.ObjectType{ AttrTypes: integrationsServerResourceInfoAttrTypes(), }, - }, target)...) + }, &target)...) - return diags + return target, diagnostics } -func flattenIntegrationsServerTopology(ctx context.Context, plan *models.IntegrationsServerPlan, target interface{}) diag.Diagnostics { - var diags diag.Diagnostics +func flattenIntegrationsServerTopology(ctx context.Context, plan *models.IntegrationsServerPlan) (types.List, diag.Diagnostics) { var result = make([]integrationsServerTopologyModelV0, 0, len(plan.ClusterTopology)) for _, topology := range plan.ClusterTopology { var model integrationsServerTopologyModelV0 @@ -111,13 +113,14 @@ func flattenIntegrationsServerTopology(ctx context.Context, plan *models.Integra result = append(result, model) } - diags.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ + var target types.List + diags := tfsdk.ValueFrom(ctx, result, types.ListType{ ElemType: types.ObjectType{ AttrTypes: apmTopologyAttrTypes(), }, - }, target)...) + }, &target) - return diags + return target, diags } func isIntegrationsServerSizePopulated(topology *models.IntegrationsServerTopologyElement) bool { diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go index eee01a0e1..02799e39a 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go @@ -22,6 +22,7 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" @@ -117,7 +118,8 @@ func Test_flattenIntegrationsServerResource(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var newState modelV0 - diags := flattenIntegrationsServerResources(context.Background(), tt.args.in, &newState.IntegrationsServer) + var diags diag.Diagnostics + newState.IntegrationsServer, diags = flattenIntegrationsServerResources(context.Background(), tt.args.in) assert.Empty(t, diags) var got []integrationsServerResourceInfoModelV0 diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_kibana.go b/ec/ecdatasource/deploymentdatasource/flatteners_kibana.go index 585498e5f..f863d0c28 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_kibana.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_kibana.go @@ -32,8 +32,8 @@ import ( // flattenKibanaResources takes in Kibana resource models and returns its // flattened form. -func flattenKibanaResources(ctx context.Context, in []*models.KibanaResourceInfo, target interface{}) diag.Diagnostics { - var diags diag.Diagnostics +func flattenKibanaResources(ctx context.Context, in []*models.KibanaResourceInfo) (types.List, diag.Diagnostics) { + var diagsnostics diag.Diagnostics var result = make([]kibanaResourceInfoModelV0, 0, len(in)) for _, res := range in { @@ -69,7 +69,9 @@ func flattenKibanaResources(ctx context.Context, in []*models.KibanaResourceInfo model.Version = types.String{Value: plan.Kibana.Version} } - diags.Append(flattenKibanaTopology(ctx, plan, &model.Topology)...) + var diags diag.Diagnostics + model.Topology, diags = flattenKibanaTopology(ctx, plan) + diagsnostics.Append(diags...) } if res.Info.Metadata != nil { @@ -80,17 +82,18 @@ func flattenKibanaResources(ctx context.Context, in []*models.KibanaResourceInfo result = append(result, model) } - diags.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ + var target types.List + + diagsnostics.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ ElemType: types.ObjectType{ AttrTypes: kibanaResourceInfoAttrTypes(), }, - }, target)...) + }, &target)...) - return diags + return target, diagsnostics } -func flattenKibanaTopology(ctx context.Context, plan *models.KibanaClusterPlan, target interface{}) diag.Diagnostics { - var diags diag.Diagnostics +func flattenKibanaTopology(ctx context.Context, plan *models.KibanaClusterPlan) (types.List, diag.Diagnostics) { var result = make([]kibanaTopologyModelV0, 0, len(plan.ClusterTopology)) for _, topology := range plan.ClusterTopology { var model kibanaTopologyModelV0 @@ -111,13 +114,15 @@ func flattenKibanaTopology(ctx context.Context, plan *models.KibanaClusterPlan, result = append(result, model) } - diags.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ + var target types.List + + diags := tfsdk.ValueFrom(ctx, result, types.ListType{ ElemType: types.ObjectType{ AttrTypes: kibanaTopologyAttrTypes(), }, - }, target)...) + }, &target) - return diags + return target, diags } func isKibanaSizePopulated(topology *models.KibanaClusterTopologyElement) bool { diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go index 3602e9cb7..2ceb6366b 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go @@ -22,6 +22,7 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" @@ -117,7 +118,8 @@ func Test_flattenKibanaResources(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var model modelV0 - diags := flattenKibanaResources(context.Background(), tt.args.in, &model.Kibana) + var diags diag.Diagnostics + model.Kibana, diags = flattenKibanaResources(context.Background(), tt.args.in) assert.Empty(t, diags) var got []kibanaResourceInfoModelV0 model.Kibana.ElementsAs(context.Background(), &got, false) diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_observability.go b/ec/ecdatasource/deploymentdatasource/flatteners_observability.go index f00735f15..d6113e946 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_observability.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_observability.go @@ -28,8 +28,7 @@ import ( ) // flattenObservability parses a deployment's observability settings. -func flattenObservability(ctx context.Context, settings *models.DeploymentSettings, target interface{}) diag.Diagnostics { - var diags diag.Diagnostics +func flattenObservability(ctx context.Context, settings *models.DeploymentSettings) (types.List, diag.Diagnostics) { model := observabilitySettingsModel{ Metrics: types.Bool{Value: false}, Logs: types.Bool{Value: false}, @@ -37,7 +36,7 @@ func flattenObservability(ctx context.Context, settings *models.DeploymentSettin empty := true if settings == nil || settings.Observability == nil { - return diags + return types.List{}, nil } // We are only accepting a single deployment ID and refID for both logs and metrics. @@ -57,14 +56,16 @@ func flattenObservability(ctx context.Context, settings *models.DeploymentSettin } if empty { - return diags + return types.List{}, nil } - diags.Append(tfsdk.ValueFrom(ctx, []observabilitySettingsModel{model}, types.ListType{ + var target types.List + + diags := tfsdk.ValueFrom(ctx, []observabilitySettingsModel{model}, types.ListType{ ElemType: types.ObjectType{ AttrTypes: observabilitySettingsAttrTypes(), }, - }, target)...) + }, &target) - return diags + return target, diags } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_observability_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_observability_test.go index 2e7330655..cda3782c3 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_observability_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_observability_test.go @@ -21,6 +21,7 @@ import ( "context" "testing" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" @@ -116,7 +117,8 @@ func TestFlattenObservability(t *testing.T) { for _, tt := range tests { var newState modelV0 t.Run(tt.name, func(t *testing.T) { - diags := flattenObservability(context.Background(), tt.args.settings, &newState.Observability) + var diags diag.Diagnostics + newState.Observability, diags = flattenObservability(context.Background(), tt.args.settings) assert.Empty(t, diags) var got []observabilitySettingsModel newState.Observability.ElementsAs(context.Background(), &got, false) diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter.go b/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter.go index eb16bb770..01aa82fc6 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter.go @@ -28,12 +28,14 @@ import ( ) // flattenTrafficFiltering parses a deployment's traffic filtering settings. -func flattenTrafficFiltering(ctx context.Context, settings *models.DeploymentSettings, target interface{}) diag.Diagnostics { - var diags diag.Diagnostics +func flattenTrafficFiltering(ctx context.Context, settings *models.DeploymentSettings) (types.List, diag.Diagnostics) { + var target types.List + if settings == nil || settings.TrafficFilterSettings == nil { - return diags + return target, nil } - diags.Append(tfsdk.ValueFrom(ctx, settings.TrafficFilterSettings.Rulesets, types.ListType{ElemType: types.StringType}, target)...) - return diags + diags := tfsdk.ValueFrom(ctx, settings.TrafficFilterSettings.Rulesets, types.ListType{ElemType: types.StringType}, &target) + + return target, diags } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter_test.go index 9cfc22c2f..7756d5707 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter_test.go @@ -21,6 +21,7 @@ import ( "context" "testing" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/stretchr/testify/assert" "github.com/elastic/cloud-sdk-go/pkg/models" @@ -77,7 +78,8 @@ func Test_flattenTrafficFiltering(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var newState modelV0 - diags := flattenTrafficFiltering(context.Background(), tt.args.settings, &newState.TrafficFilter) + var diags diag.Diagnostics + newState.TrafficFilter, diags = flattenTrafficFiltering(context.Background(), tt.args.settings) assert.Empty(t, diags) var got []string newState.TrafficFilter.ElementsAs(context.Background(), &got, false) From 7400f7c744be22755ac408d6a1f485778cddf607 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 11 Jan 2023 15:59:26 +0100 Subject: [PATCH 070/104] remove obsolete utility funcs --- .../flatteners_elasticsearch.go | 2 +- .../flatteners_elasticsearch_test.go | 13 +++- ec/internal/util/set_util.go | 39 ---------- ec/internal/util/set_util_test.go | 76 ------------------- 4 files changed, 12 insertions(+), 118 deletions(-) delete mode 100644 ec/internal/util/set_util.go delete mode 100644 ec/internal/util/set_util_test.go diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go index 7849b0eb1..ce925cf90 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go @@ -137,7 +137,7 @@ func flattenElasticsearchTopology(ctx context.Context, plan *models.Elasticsearc } if len(topology.NodeRoles) > 0 { - diags.Append(tfsdk.ValueFrom(ctx, util.StringToItems(topology.NodeRoles...), types.SetType{ElemType: types.StringType}, &model.NodeRoles)...) + diags.Append(tfsdk.ValueFrom(ctx, topology.NodeRoles, types.SetType{ElemType: types.StringType}, &model.NodeRoles)...) } var autoscaling elasticsearchAutoscalingModel diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go index 43da2e9cf..3f4e0c79a 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go @@ -86,6 +86,9 @@ func Test_flattenElasticsearchResources(t *testing.T) { Master: ec.Bool(true), Ml: ec.Bool(false), }, + // NodeRoles cannot be used simultaneously with NodeType + // but let's have it here for testing purposes + NodeRoles: []string{"data_content", "data_hot"}, AutoscalingMax: &models.TopologySize{ Resource: ec.String("memory"), Value: ec.Int32(15360), @@ -143,8 +146,14 @@ func Test_flattenElasticsearchResources(t *testing.T) { "node_type_ingest": types.Bool{Value: true}, "node_type_master": types.Bool{Value: true}, "node_type_ml": types.Bool{Value: false}, - "node_roles": types.Set{ElemType: types.StringType, Elems: []attr.Value{}}, - "zone_count": types.Int64{Value: 1}, + "node_roles": types.Set{ElemType: types.StringType, Elems: func() []attr.Value { + result := make([]attr.Value, 0, 2) + for _, role := range []string{"data_content", "data_hot"} { + result = append(result, types.String{Value: role}) + } + return result + }()}, + "zone_count": types.Int64{Value: 1}, "autoscaling": types.List{ElemType: types.ObjectType{AttrTypes: elasticsearchAutoscalingAttrTypes()}, Elems: []attr.Value{types.Object{ AttrTypes: elasticsearchAutoscalingAttrTypes(), diff --git a/ec/internal/util/set_util.go b/ec/internal/util/set_util.go deleted file mode 100644 index 0c0caa9cf..000000000 --- a/ec/internal/util/set_util.go +++ /dev/null @@ -1,39 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package util - -import "sort" - -// StringToItems takes in a slice of strings and returns a []interface{}. -func StringToItems(elems ...string) (result []interface{}) { - for _, e := range elems { - result = append(result, e) - } - - return result -} - -// ItemsToString takes in an []interface{} and returns a slice of strings. -func ItemsToString(elems []interface{}) (result []string) { - for _, e := range elems { - result = append(result, e.(string)) - } - sort.Strings(result) - - return result -} diff --git a/ec/internal/util/set_util_test.go b/ec/internal/util/set_util_test.go deleted file mode 100644 index 3c85e3b9a..000000000 --- a/ec/internal/util/set_util_test.go +++ /dev/null @@ -1,76 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package util - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestStringItems(t *testing.T) { - type args struct { - elems []string - } - tests := []struct { - name string - args args - wantResult []interface{} - }{ - { - name: "empty list returns nil", - }, - { - name: "populated list returns the results as []interface{}", - args: args{elems: []string{"some", "some-other", ""}}, - wantResult: []interface{}{"some", "some-other", ""}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotResult := StringToItems(tt.args.elems...) - assert.Equal(t, tt.wantResult, gotResult) - }) - } -} - -func TestItemsToString(t *testing.T) { - type args struct { - elems []interface{} - } - tests := []struct { - name string - args args - wantResult []string - }{ - { - name: "empty list returns nil", - }, - { - name: "populated list returns the results as []string{}", - args: args{elems: []interface{}{"some", "some-other", ""}}, - wantResult: []string{"", "some", "some-other"}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotResult := ItemsToString(tt.args.elems) - assert.Equal(t, tt.wantResult, gotResult) - }) - } -} From c0924dde6979bc9e620107653e5f444cbc01dcfc Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 11 Jan 2023 16:04:03 +0100 Subject: [PATCH 071/104] renaming local var --- .../deploymentdatasource/flatteners_elasticsearch.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go index ce925cf90..0e7d1be73 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go @@ -141,17 +141,17 @@ func flattenElasticsearchTopology(ctx context.Context, plan *models.Elasticsearc } var autoscaling elasticsearchAutoscalingModel - var empty = true + var hasAutoscalingModel = false if limit := topology.AutoscalingMax; limit != nil { autoscaling.MaxSizeResource = types.String{Value: *limit.Resource} autoscaling.MaxSize = types.String{Value: util.MemoryToState(*limit.Value)} - empty = false + hasAutoscalingModel = true } if limit := topology.AutoscalingMin; limit != nil { autoscaling.MinSizeResource = types.String{Value: *limit.Resource} autoscaling.MinSize = types.String{Value: util.MemoryToState(*limit.Value)} - empty = false + hasAutoscalingModel = true } if topology.AutoscalingPolicyOverrideJSON != nil { @@ -163,11 +163,11 @@ func flattenElasticsearchTopology(ctx context.Context, plan *models.Elasticsearc ) } else { autoscaling.PolicyOverrideJson = types.String{Value: string(b)} - empty = false + hasAutoscalingModel = true } } - if !empty { + if hasAutoscalingModel { diags.Append(tfsdk.ValueFrom(ctx, []elasticsearchAutoscalingModel{autoscaling}, elasticsearchAutoscalingListType(), &model.Autoscaling)...) } From 6c507cfd60b397cb32d8d3f00f390cd0fd062e59 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Thu, 12 Jan 2023 22:44:28 +0100 Subject: [PATCH 072/104] fix converion to attr.Value in deploymentdatasource --- .../flatteners_apm_test.go | 9 ++--- .../flatteners_elasticsearch_test.go | 10 ++---- .../flatteners_enterprise_search_test.go | 9 ++--- .../flatteners_integrations_server_test.go | 9 ++--- .../flatteners_kibana_test.go | 8 ++--- .../flatteners_observability.go | 20 ++++++----- .../flatteners_observability_test.go | 8 ++--- .../flatteners_traffic_filter.go | 5 +-- .../flatteners_traffic_filter_test.go | 35 ++++++++++++++----- 9 files changed, 58 insertions(+), 55 deletions(-) diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go index b927e0f63..674e3c22c 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go @@ -22,7 +22,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-framework/attr" - "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" @@ -115,14 +114,12 @@ func Test_flattenApmResource(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var newState modelV0 - var diags diag.Diagnostics - newState.Apm, diags = flattenApmResources(context.Background(), tt.args.in) + apm, diags := flattenApmResources(context.Background(), tt.args.in) assert.Empty(t, diags) - var got []apmResourceInfoModelV0 - newState.Apm.ElementsAs(context.Background(), &got, false) + apm.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) + checkConverionToAttrValue(t, "apm", apm) }) } } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go index 3f4e0c79a..a6489c16f 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go @@ -22,7 +22,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-framework/attr" - "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" @@ -174,15 +173,12 @@ func Test_flattenElasticsearchResources(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var model modelV0 - var diags diag.Diagnostics - - model.Elasticsearch, diags = flattenElasticsearchResources(context.Background(), tt.args.in) + elasticsearch, diags := flattenElasticsearchResources(context.Background(), tt.args.in) assert.Empty(t, diags) - var got []elasticsearchResourceInfoModelV0 - model.Elasticsearch.ElementsAs(context.Background(), &got, false) + elasticsearch.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) + checkConverionToAttrValue(t, "elasticsearch", elasticsearch) }) } } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go index aa8c6bf3b..09fde1814 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go @@ -22,7 +22,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-framework/attr" - "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" @@ -130,14 +129,12 @@ func Test_flattenEnterpriseSearchResource(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var model modelV0 - var diags diag.Diagnostics - model.EnterpriseSearch, diags = flattenEnterpriseSearchResources(context.Background(), tt.args.in) + enterpriseSearch, diags := flattenEnterpriseSearchResources(context.Background(), tt.args.in) assert.Empty(t, diags) - var got []enterpriseSearchResourceInfoModelV0 - model.EnterpriseSearch.ElementsAs(context.Background(), &got, false) + enterpriseSearch.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) + checkConverionToAttrValue(t, "enterprise_search", enterpriseSearch) }) } } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go index 02799e39a..cc893d202 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go @@ -22,7 +22,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-framework/attr" - "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" @@ -117,14 +116,12 @@ func Test_flattenIntegrationsServerResource(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var newState modelV0 - var diags diag.Diagnostics - newState.IntegrationsServer, diags = flattenIntegrationsServerResources(context.Background(), tt.args.in) + integrationsServer, diags := flattenIntegrationsServerResources(context.Background(), tt.args.in) assert.Empty(t, diags) - var got []integrationsServerResourceInfoModelV0 - newState.IntegrationsServer.ElementsAs(context.Background(), &got, false) + integrationsServer.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) + checkConverionToAttrValue(t, "integrations_server", integrationsServer) }) } } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go index 2ceb6366b..3ee6c6bc5 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go @@ -22,7 +22,6 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-framework/attr" - "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" @@ -117,13 +116,12 @@ func Test_flattenKibanaResources(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var model modelV0 - var diags diag.Diagnostics - model.Kibana, diags = flattenKibanaResources(context.Background(), tt.args.in) + kibana, diags := flattenKibanaResources(context.Background(), tt.args.in) assert.Empty(t, diags) var got []kibanaResourceInfoModelV0 - model.Kibana.ElementsAs(context.Background(), &got, false) + kibana.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) + checkConverionToAttrValue(t, "kibana", kibana) }) } } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_observability.go b/ec/ecdatasource/deploymentdatasource/flatteners_observability.go index d6113e946..9bf527c5b 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_observability.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_observability.go @@ -35,8 +35,15 @@ func flattenObservability(ctx context.Context, settings *models.DeploymentSettin } empty := true + target := types.List{ + ElemType: types.ObjectType{ + AttrTypes: observabilitySettingsAttrTypes(), + }, + } + if settings == nil || settings.Observability == nil { - return types.List{}, nil + target.Null = true + return target, nil } // We are only accepting a single deployment ID and refID for both logs and metrics. @@ -56,16 +63,11 @@ func flattenObservability(ctx context.Context, settings *models.DeploymentSettin } if empty { - return types.List{}, nil + target.Null = true + return target, nil } - var target types.List - - diags := tfsdk.ValueFrom(ctx, []observabilitySettingsModel{model}, types.ListType{ - ElemType: types.ObjectType{ - AttrTypes: observabilitySettingsAttrTypes(), - }, - }, &target) + diags := tfsdk.ValueFrom(ctx, []observabilitySettingsModel{model}, target.Type(ctx), &target) return target, diags } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_observability_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_observability_test.go index cda3782c3..8e46535c3 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_observability_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_observability_test.go @@ -21,7 +21,6 @@ import ( "context" "testing" - "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" @@ -115,14 +114,13 @@ func TestFlattenObservability(t *testing.T) { }, } for _, tt := range tests { - var newState modelV0 t.Run(tt.name, func(t *testing.T) { - var diags diag.Diagnostics - newState.Observability, diags = flattenObservability(context.Background(), tt.args.settings) + observability, diags := flattenObservability(context.Background(), tt.args.settings) assert.Empty(t, diags) var got []observabilitySettingsModel - newState.Observability.ElementsAs(context.Background(), &got, false) + observability.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) + checkConverionToAttrValue(t, "observability", observability) }) } } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter.go b/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter.go index 01aa82fc6..b2c5d4e81 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter.go @@ -29,13 +29,14 @@ import ( // flattenTrafficFiltering parses a deployment's traffic filtering settings. func flattenTrafficFiltering(ctx context.Context, settings *models.DeploymentSettings) (types.List, diag.Diagnostics) { - var target types.List + target := types.List{ElemType: types.StringType} if settings == nil || settings.TrafficFilterSettings == nil { + target.Null = true return target, nil } - diags := tfsdk.ValueFrom(ctx, settings.TrafficFilterSettings.Rulesets, types.ListType{ElemType: types.StringType}, &target) + diags := tfsdk.ValueFrom(ctx, settings.TrafficFilterSettings.Rulesets, target.Type(ctx), &target) return target, diags } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter_test.go index 7756d5707..34c9353e0 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter_test.go @@ -21,7 +21,9 @@ import ( "context" "testing" - "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" "github.com/elastic/cloud-sdk-go/pkg/models" @@ -37,21 +39,21 @@ func Test_flattenTrafficFiltering(t *testing.T) { want []string }{ { - name: "parses no rules when they're empty", + name: "parses no rules when they're empty 1", args: args{}, }, { - name: "parses no rules when they're empty", + name: "parses no rules when they're empty 2", args: args{settings: &models.DeploymentSettings{}}, }, { - name: "parses no rules when they're empty", + name: "parses no rules when they're empty 3", args: args{settings: &models.DeploymentSettings{ TrafficFilterSettings: &models.TrafficFilterSettings{}, }}, }, { - name: "parses no rules when they're empty", + name: "parses no rules when they're empty 4", args: args{settings: &models.DeploymentSettings{ TrafficFilterSettings: &models.TrafficFilterSettings{ Rulesets: []string{}, @@ -77,13 +79,28 @@ func Test_flattenTrafficFiltering(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var newState modelV0 - var diags diag.Diagnostics - newState.TrafficFilter, diags = flattenTrafficFiltering(context.Background(), tt.args.settings) + trafficFilter, diags := flattenTrafficFiltering(context.Background(), tt.args.settings) assert.Empty(t, diags) var got []string - newState.TrafficFilter.ElementsAs(context.Background(), &got, false) + trafficFilter.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) + + checkConverionToAttrValue(t, "traffic_filter", trafficFilter) }) } } + +// checking conversion to attr.Value +// it should catch cases when e.g. the func under test returns types.List{} +func checkConverionToAttrValue(t *testing.T, attributeName string, attributeValue types.List) { + var target types.List + diags := tfsdk.ValueFrom(context.Background(), attributeValue, attributeType(t, attributeName), &target) + assert.Nil(t, diags) +} + +func attributeType(t *testing.T, attributeName string) attr.Type { + var d DataSource + schema, diags := d.GetSchema(context.Background()) + assert.Nil(t, diags) + return schema.Attributes[attributeName].FrameworkType() +} From fc8a6cfada98572c3712ce03c604f5a73eaeddc9 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Fri, 13 Jan 2023 12:04:12 +0100 Subject: [PATCH 073/104] deploymentresource read - remove conversion to TF types --- ec/ecresource/deploymentresource/read.go | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/ec/ecresource/deploymentresource/read.go b/ec/ecresource/deploymentresource/read.go index 326286946..77cddf660 100644 --- a/ec/ecresource/deploymentresource/read.go +++ b/ec/ecresource/deploymentresource/read.go @@ -55,7 +55,7 @@ func (r *Resource) Read(ctx context.Context, request resource.ReadRequest, respo return } - var newState *deploymentv2.DeploymentTF + var newState *deploymentv2.Deployment // use state for the plan (there is no plan and config during Read) - otherwise we can get unempty plan output newState, diags = r.read(ctx, curState.Id.Value, &curState, curState, nil) @@ -73,7 +73,7 @@ func (r *Resource) Read(ctx context.Context, request resource.ReadRequest, respo response.Diagnostics.Append(diags...) } -func (r *Resource) read(ctx context.Context, id string, state *deploymentv2.DeploymentTF, plan deploymentv2.DeploymentTF, deploymentResources []*models.DeploymentResource) (*deploymentv2.DeploymentTF, diag.Diagnostics) { +func (r *Resource) read(ctx context.Context, id string, state *deploymentv2.DeploymentTF, plan deploymentv2.DeploymentTF, deploymentResources []*models.DeploymentResource) (*deploymentv2.Deployment, diag.Diagnostics) { var diags diag.Diagnostics response, err := deploymentapi.Get(deploymentapi.GetParams{ @@ -159,19 +159,7 @@ func (r *Resource) read(ctx context.Context, id string, state *deploymentv2.Depl deployment.Elasticsearch.Config = nil } - var deploymentTF deploymentv2.DeploymentTF - - schema, diags := r.GetSchema(ctx) - - if diags.HasError() { - return nil, diags - } - - if diags := tfsdk.ValueFrom(ctx, deployment, schema.Type(), &deploymentTF); diags.HasError() { - return nil, diags - } - - return &deploymentTF, diags + return deployment, diags } func deploymentNotFound(err error) bool { From 8e8a85cc411ea474d648c8ecc059255ff422183e Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Fri, 13 Jan 2023 12:55:44 +0100 Subject: [PATCH 074/104] Fix description an error messages. Remove obsolete TODO --- .../deploymentdatasource/schema_apm.go | 12 ++++++------ .../schema_elasticsearch.go | 18 +++++++++--------- .../schema_enterprise_search.go | 2 +- .../schema_integrations_server.go | 2 +- .../deploymentdatasource/schema_kibana.go | 10 +++++----- .../deploymentsdatasource/datasource.go | 6 ------ .../deploymentsdatasource/expanders.go | 2 +- .../elasticsearch/v2/schema.go | 6 +++--- 8 files changed, 26 insertions(+), 32 deletions(-) diff --git a/ec/ecdatasource/deploymentdatasource/schema_apm.go b/ec/ecdatasource/deploymentdatasource/schema_apm.go index 8dcb10ed1..33adf0e6f 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_apm.go +++ b/ec/ecdatasource/deploymentdatasource/schema_apm.go @@ -32,22 +32,22 @@ func apmResourceInfoSchema() tfsdk.Attribute { Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ "elasticsearch_cluster_ref_id": { Type: types.StringType, - Description: "The locally-unique user-specified id of an Elasticsearch Resource.", + Description: "The locally-unique user-specified id of an APM Resource.", Computed: true, }, "healthy": { Type: types.BoolType, - Description: "Resource kind health status.", + Description: "APM resource health status.", Computed: true, }, "http_endpoint": { Type: types.StringType, - Description: "HTTP endpoint for the resource kind.", + Description: "HTTP endpoint for the APM resource.", Computed: true, }, "https_endpoint": { Type: types.StringType, - Description: "HTTPS endpoint for the resource kind.", + Description: "HTTPS endpoint for the APM resource.", Computed: true, }, "ref_id": { @@ -62,7 +62,7 @@ func apmResourceInfoSchema() tfsdk.Attribute { }, "status": { Type: types.StringType, - Description: "Resource kind status (for example, \"started\", \"stopped\", etc).", + Description: "APM resource status (for example, \"started\", \"stopped\", etc).", Computed: true, }, "version": { @@ -92,7 +92,7 @@ func apmTopologySchema() tfsdk.Attribute { }, "size": { Type: types.StringType, - Description: "Amount of size_resource in Gigabytes. For example \"4g\".", + Description: `Amount of "size_resource" in Gigabytes. For example "4g".`, Computed: true, }, "size_resource": { diff --git a/ec/ecdatasource/deploymentdatasource/schema_elasticsearch.go b/ec/ecdatasource/deploymentdatasource/schema_elasticsearch.go index e79e0cb6c..47ddebe93 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_elasticsearch.go +++ b/ec/ecdatasource/deploymentdatasource/schema_elasticsearch.go @@ -26,7 +26,7 @@ import ( func elasticsearchResourceInfoSchema() tfsdk.Attribute { return tfsdk.Attribute{ - Description: "Instance configuration of the Elasticsearch resource kind.", + Description: "Instance configuration of the Elasticsearch Elasticsearch resource.", Computed: true, Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ @@ -37,7 +37,7 @@ func elasticsearchResourceInfoSchema() tfsdk.Attribute { }, "healthy": { Type: types.BoolType, - Description: "Resource kind health status.", + Description: "Elasticsearch resource health status.", Computed: true, }, "cloud_id": { @@ -48,12 +48,12 @@ func elasticsearchResourceInfoSchema() tfsdk.Attribute { }, "http_endpoint": { Type: types.StringType, - Description: "HTTP endpoint for the resource kind.", + Description: "HTTP endpoint for the Elasticsearch resource.", Computed: true, }, "https_endpoint": { Type: types.StringType, - Description: "HTTPS endpoint for the resource kind.", + Description: "HTTPS endpoint for the Elasticsearch resource.", Computed: true, }, "ref_id": { @@ -68,7 +68,7 @@ func elasticsearchResourceInfoSchema() tfsdk.Attribute { }, "status": { Type: types.StringType, - Description: "Resource kind status (for example, \"started\", \"stopped\", etc).", + Description: "Elasticsearch resource status (for example, \"started\", \"stopped\", etc).", Computed: true, }, "version": { @@ -98,7 +98,7 @@ func elasticsearchTopologySchema() tfsdk.Attribute { }, "size": { Type: types.StringType, - Description: "Amount of resource per topology element in the \"g\" notation.", + Description: `Amount of "size_resource" per topology element in Gigabytes. For example "4g".`, Computed: true, }, "size_resource": { @@ -118,17 +118,17 @@ func elasticsearchTopologySchema() tfsdk.Attribute { }, "node_type_master": { Type: types.BoolType, - Description: " Defines whether this node can be elected master (<7.10.0).", + Description: " Defines whether this node can be elected master (<8.0).", Computed: true, }, "node_type_ingest": { Type: types.BoolType, - Description: "Defines whether this node can run an ingest pipeline (<7.10.0).", + Description: "Defines whether this node can run an ingest pipeline (<8.0).", Computed: true, }, "node_type_ml": { Type: types.BoolType, - Description: "Defines whether this node can run ML jobs (<7.10.0).", + Description: "Defines whether this node can run ML jobs (<8.0).", Computed: true, }, "node_roles": { diff --git a/ec/ecdatasource/deploymentdatasource/schema_enterprise_search.go b/ec/ecdatasource/deploymentdatasource/schema_enterprise_search.go index 3dbb3e292..a59c5440f 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_enterprise_search.go +++ b/ec/ecdatasource/deploymentdatasource/schema_enterprise_search.go @@ -92,7 +92,7 @@ func enterpriseSearchTopologySchema() tfsdk.Attribute { }, "size": { Type: types.StringType, - Description: "Amount of resource per topology element in the \"g\" notation.", + Description: `Amount of "size_resource" in Gigabytes. For example "4g".`, Computed: true, }, "size_resource": { diff --git a/ec/ecdatasource/deploymentdatasource/schema_integrations_server.go b/ec/ecdatasource/deploymentdatasource/schema_integrations_server.go index 5314ba8ce..8701565f5 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_integrations_server.go +++ b/ec/ecdatasource/deploymentdatasource/schema_integrations_server.go @@ -92,7 +92,7 @@ func integrationsServerTopologySchema() tfsdk.Attribute { }, "size": { Type: types.StringType, - Description: "Amount of resource per topology element in the \"g\" notation.", + Description: `Amount of "size_resource" in Gigabytes. For example "4g".`, Computed: true, }, "size_resource": { diff --git a/ec/ecdatasource/deploymentdatasource/schema_kibana.go b/ec/ecdatasource/deploymentdatasource/schema_kibana.go index 63468f12b..35ebee7f2 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_kibana.go +++ b/ec/ecdatasource/deploymentdatasource/schema_kibana.go @@ -37,17 +37,17 @@ func kibanaResourceInfoSchema() tfsdk.Attribute { }, "healthy": { Type: types.BoolType, - Description: "Resource kind health status.", + Description: "Kibana resource health status.", Computed: true, }, "http_endpoint": { Type: types.StringType, - Description: "HTTP endpoint for the resource kind.", + Description: "HTTP endpoint for the Kibana resource.", Computed: true, }, "https_endpoint": { Type: types.StringType, - Description: "HTTPS endpoint for the resource kind.", + Description: "HTTPS endpoint for the Kibana resource.", Computed: true, }, "ref_id": { @@ -62,7 +62,7 @@ func kibanaResourceInfoSchema() tfsdk.Attribute { }, "status": { Type: types.StringType, - Description: "Resource kind status (for example, \"started\", \"stopped\", etc).", + Description: "Kibana resource status (for example, \"started\", \"stopped\", etc).", Computed: true, }, "version": { @@ -92,7 +92,7 @@ func kibanaTopologySchema() tfsdk.Attribute { }, "size": { Type: types.StringType, - Description: "Amount of resource per topology element in the \"g\" notation.", + Description: "Amount of size_resource in Gigabytes. For example \"4g\".", Computed: true, }, "size_resource": { diff --git a/ec/ecdatasource/deploymentsdatasource/datasource.go b/ec/ecdatasource/deploymentsdatasource/datasource.go index 18700210b..79f4df5b7 100644 --- a/ec/ecdatasource/deploymentsdatasource/datasource.go +++ b/ec/ecdatasource/deploymentsdatasource/datasource.go @@ -96,12 +96,6 @@ func (d DataSource) Read(ctx context.Context, request datasource.ReadRequest, re response.Diagnostics.Append(response.State.Set(ctx, newState)...) } -/* TODO - see https://github.com/multani/terraform-provider-camunda/pull/16/files -Timeouts: &schema.ResourceTimeout{ - Default: schema.DefaultTimeout(5 * time.Minute), -}, -*/ - func modelToState(ctx context.Context, res *models.DeploymentsSearchResponse, state *modelV0) diag.Diagnostics { var diags diag.Diagnostics diff --git a/ec/ecdatasource/deploymentsdatasource/expanders.go b/ec/ecdatasource/deploymentsdatasource/expanders.go index 4973772e8..a1bfdeac4 100644 --- a/ec/ecdatasource/deploymentsdatasource/expanders.go +++ b/ec/ecdatasource/deploymentsdatasource/expanders.go @@ -165,7 +165,7 @@ func expandResourceFilters(ctx context.Context, resources *types.List, resourceK healthyTermPath := resourceKindPath + ".info.healthy" if filter.Healthy.Value != "true" && filter.Healthy.Value != "false" { - diags.AddError("invalid value for healthy", fmt.Sprintf("invalid value for healthy (true|false): '%s'", filter.Healthy.Value)) + diags.AddError("invalid value for healthy", fmt.Sprintf("expected either [true] or [false] but got [%s]", filter.Healthy.Value)) return nil, diags } diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go index 55cf21bd1..4cdea11d2 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go @@ -393,7 +393,7 @@ func ElasticsearchTopologySchema(description string, required bool, topologyAttr }, "size": { Type: types.StringType, - Description: `Optional amount of memory per node in the "g" notation`, + Description: `Amount of "size_resource" per node in the "g" notation`, Computed: true, Optional: true, PlanModifiers: tfsdk.AttributePlanModifiers{ @@ -402,7 +402,7 @@ func ElasticsearchTopologySchema(description string, required bool, topologyAttr }, "size_resource": { Type: types.StringType, - Description: `Optional size type, defaults to "memory".`, + Description: `Size type, defaults to "memory".`, Optional: true, Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ @@ -411,7 +411,7 @@ func ElasticsearchTopologySchema(description string, required bool, topologyAttr }, "zone_count": { Type: types.Int64Type, - Description: `Optional number of zones that the Elasticsearch cluster will span. This is used to set HA`, + Description: `Number of zones that the Elasticsearch cluster will span. This is used to set HA`, Computed: true, Optional: true, PlanModifiers: tfsdk.AttributePlanModifiers{ From 07a5908bcd3493c32dc05573cc8e5c14513dce62 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Fri, 13 Jan 2023 13:33:05 +0100 Subject: [PATCH 075/104] minor improvements for deploymentresource tests --- ec/ecdatasource/stackdatasource/datasource.go | 6 ------ .../elasticsearch/v2/elasticsearch_read_test.go | 12 ++++-------- .../v2/enterprise_search_read_test.go | 7 +++++++ .../v2/integrations_server_read_test.go | 4 ++-- .../deploymentresource/kibana/v2/kibana_read_test.go | 4 ++-- .../observability/v2/observability_read_test.go | 4 ++-- 6 files changed, 17 insertions(+), 20 deletions(-) diff --git a/ec/ecdatasource/stackdatasource/datasource.go b/ec/ecdatasource/stackdatasource/datasource.go index b2a7f0982..e75e5e8e0 100644 --- a/ec/ecdatasource/stackdatasource/datasource.go +++ b/ec/ecdatasource/stackdatasource/datasource.go @@ -122,12 +122,6 @@ func modelToState(ctx context.Context, stack *models.StackVersionConfig, state * return diags } -/* TODO - see https://github.com/multani/terraform-provider-camunda/pull/16/files -Timeouts: &schema.ResourceTimeout{ - Default: schema.DefaultTimeout(5 * time.Minute), -}, -*/ - func stackFromFilters(expr, version string, locked bool, stacks []*models.StackVersionConfig) (*models.StackVersionConfig, error) { if expr == "latest" && locked && version != "" { expr = version diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go index 5bc97e074..9900db03e 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go @@ -23,7 +23,6 @@ import ( "github.com/stretchr/testify/assert" - "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" @@ -38,10 +37,9 @@ func Test_readElasticsearch(t *testing.T) { remotes models.RemoteResources } tests := []struct { - name string - args args - want *Elasticsearch - diags diag.Diagnostics + name string + args args + want *Elasticsearch }{ { name: "empty resource list returns empty list", @@ -261,9 +259,7 @@ func Test_readElasticsearch(t *testing.T) { var esObj types.Object diags := tfsdk.ValueFrom(context.Background(), got, ElasticsearchSchema().FrameworkType(), &esObj) - if tt.diags.HasError() { - assert.Equal(t, tt.diags, diags) - } + assert.Nil(t, diags) }) } } diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read_test.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read_test.go index 564d0a93c..d67b70935 100644 --- a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read_test.go +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read_test.go @@ -18,8 +18,11 @@ package v2 import ( + "context" "testing" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" "github.com/elastic/cloud-sdk-go/pkg/api/mock" @@ -181,6 +184,10 @@ func Test_readEnterpriseSearch(t *testing.T) { got, err := ReadEnterpriseSearches(tt.args.in) assert.Nil(t, err) assert.Equal(t, tt.want, got) + + var obj types.Object + diags := tfsdk.ValueFrom(context.Background(), got, EnterpriseSearchSchema().FrameworkType(), &obj) + assert.Nil(t, diags) }) } } diff --git a/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_read_test.go b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_read_test.go index df718d069..3c4711f05 100644 --- a/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_read_test.go +++ b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_read_test.go @@ -337,8 +337,8 @@ func Test_readIntegrationsServer(t *testing.T) { assert.Nil(t, err) assert.Equal(t, tt.want, srv) - var srvTF types.Object - diags := tfsdk.ValueFrom(context.Background(), srv, IntegrationsServerSchema().FrameworkType(), &srvTF) + var obj types.Object + diags := tfsdk.ValueFrom(context.Background(), srv, IntegrationsServerSchema().FrameworkType(), &obj) assert.Nil(t, diags) }) } diff --git a/ec/ecresource/deploymentresource/kibana/v2/kibana_read_test.go b/ec/ecresource/deploymentresource/kibana/v2/kibana_read_test.go index 3155dccf3..8b44a474b 100644 --- a/ec/ecresource/deploymentresource/kibana/v2/kibana_read_test.go +++ b/ec/ecresource/deploymentresource/kibana/v2/kibana_read_test.go @@ -167,8 +167,8 @@ func Test_ReadKibana(t *testing.T) { assert.Nil(t, err) assert.Equal(t, tt.want, kibana) - var kibanaTF types.Object - diags := tfsdk.ValueFrom(context.Background(), kibana, KibanaSchema().FrameworkType(), &kibanaTF) + var obj types.Object + diags := tfsdk.ValueFrom(context.Background(), kibana, KibanaSchema().FrameworkType(), &obj) assert.Nil(t, diags) }) } diff --git a/ec/ecresource/deploymentresource/observability/v2/observability_read_test.go b/ec/ecresource/deploymentresource/observability/v2/observability_read_test.go index 31c573713..e6f7aa4e0 100644 --- a/ec/ecresource/deploymentresource/observability/v2/observability_read_test.go +++ b/ec/ecresource/deploymentresource/observability/v2/observability_read_test.go @@ -119,8 +119,8 @@ func Test_readObservability(t *testing.T) { assert.Nil(t, err) assert.Equal(t, tt.want, observability) - var observabilityTF types.Object - diags := tfsdk.ValueFrom(context.Background(), observability, ObservabilitySchema().FrameworkType(), &observabilityTF) + var obj types.Object + diags := tfsdk.ValueFrom(context.Background(), observability, ObservabilitySchema().FrameworkType(), &obj) assert.Nil(t, diags) }) } From a36caba9a71331fb8de73dc7e22fbcc3a4d28f73 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Fri, 13 Jan 2023 16:57:25 +0100 Subject: [PATCH 076/104] Replace any interface arg with returned type in stackdatasource --- .../flatteners_apm_test.go | 3 +- .../flatteners_elasticsearch_test.go | 3 +- .../flatteners_enterprise_search_test.go | 3 +- .../flatteners_integrations_server_test.go | 3 +- .../flatteners_kibana_test.go | 3 +- .../flatteners_observability_test.go | 9 +++--- .../flatteners_traffic_filter_test.go | 30 ++++------------- ec/ecdatasource/stackdatasource/datasource.go | 22 +++++++++---- .../stackdatasource/flatteners_apm.go | 28 ++++++++-------- .../stackdatasource/flatteners_apm_test.go | 11 ++++--- .../flatteners_elasticsearch.go | 32 +++++++++---------- .../flatteners_elasticsearch_test.go | 11 ++++--- .../flatteners_enterprise_search.go | 28 ++++++++-------- .../flatteners_enterprise_search_test.go | 11 ++++--- .../stackdatasource/flatteners_kibana.go | 28 ++++++++-------- .../stackdatasource/flatteners_kibana_test.go | 11 ++++--- ec/internal/util/testutils.go | 17 ++++++++++ 17 files changed, 131 insertions(+), 122 deletions(-) diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go index 674e3c22c..2c9f43725 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go @@ -28,6 +28,7 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_flattenApmResource(t *testing.T) { @@ -119,7 +120,7 @@ func Test_flattenApmResource(t *testing.T) { var got []apmResourceInfoModelV0 apm.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) - checkConverionToAttrValue(t, "apm", apm) + util.CheckConverionToAttrValue(t, &DataSource{}, "apm", apm) }) } } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go index a6489c16f..3e623b99f 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go @@ -28,6 +28,7 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_flattenElasticsearchResources(t *testing.T) { @@ -178,7 +179,7 @@ func Test_flattenElasticsearchResources(t *testing.T) { var got []elasticsearchResourceInfoModelV0 elasticsearch.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) - checkConverionToAttrValue(t, "elasticsearch", elasticsearch) + util.CheckConverionToAttrValue(t, &DataSource{}, "elasticsearch", elasticsearch) }) } } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go index 09fde1814..528b310b6 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go @@ -28,6 +28,7 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_flattenEnterpriseSearchResource(t *testing.T) { @@ -134,7 +135,7 @@ func Test_flattenEnterpriseSearchResource(t *testing.T) { var got []enterpriseSearchResourceInfoModelV0 enterpriseSearch.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) - checkConverionToAttrValue(t, "enterprise_search", enterpriseSearch) + util.CheckConverionToAttrValue(t, &DataSource{}, "enterprise_search", enterpriseSearch) }) } } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go index cc893d202..8eb6d73df 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go @@ -28,6 +28,7 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_flattenIntegrationsServerResource(t *testing.T) { @@ -121,7 +122,7 @@ func Test_flattenIntegrationsServerResource(t *testing.T) { var got []integrationsServerResourceInfoModelV0 integrationsServer.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) - checkConverionToAttrValue(t, "integrations_server", integrationsServer) + util.CheckConverionToAttrValue(t, &DataSource{}, "integrations_server", integrationsServer) }) } } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go index 3ee6c6bc5..8c023e164 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go @@ -28,6 +28,7 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_flattenKibanaResources(t *testing.T) { @@ -121,7 +122,7 @@ func Test_flattenKibanaResources(t *testing.T) { var got []kibanaResourceInfoModelV0 kibana.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) - checkConverionToAttrValue(t, "kibana", kibana) + util.CheckConverionToAttrValue(t, &DataSource{}, "kibana", kibana) }) } } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_observability_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_observability_test.go index 8e46535c3..3396f898f 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_observability_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_observability_test.go @@ -26,6 +26,7 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func TestFlattenObservability(t *testing.T) { @@ -38,15 +39,15 @@ func TestFlattenObservability(t *testing.T) { want []observabilitySettingsModel }{ { - name: "flattens no observability settings when empty", + name: "flattens no observability settings when empty #1", args: args{}, }, { - name: "flattens no observability settings when empty", + name: "flattens no observability settings when empty #2", args: args{settings: &models.DeploymentSettings{}}, }, { - name: "flattens no observability settings when empty", + name: "flattens no observability settings when empty #3", args: args{settings: &models.DeploymentSettings{Observability: &models.DeploymentObservabilitySettings{}}}, }, { @@ -120,7 +121,7 @@ func TestFlattenObservability(t *testing.T) { var got []observabilitySettingsModel observability.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) - checkConverionToAttrValue(t, "observability", observability) + util.CheckConverionToAttrValue(t, &DataSource{}, "observability", observability) }) } } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter_test.go index 34c9353e0..87c18a07e 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter_test.go @@ -21,12 +21,10 @@ import ( "context" "testing" - "github.com/hashicorp/terraform-plugin-framework/attr" - "github.com/hashicorp/terraform-plugin-framework/tfsdk" - "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_flattenTrafficFiltering(t *testing.T) { @@ -39,21 +37,21 @@ func Test_flattenTrafficFiltering(t *testing.T) { want []string }{ { - name: "parses no rules when they're empty 1", + name: "parses no rules when they're empty #1", args: args{}, }, { - name: "parses no rules when they're empty 2", + name: "parses no rules when they're empty #2", args: args{settings: &models.DeploymentSettings{}}, }, { - name: "parses no rules when they're empty 3", + name: "parses no rules when they're empty #3", args: args{settings: &models.DeploymentSettings{ TrafficFilterSettings: &models.TrafficFilterSettings{}, }}, }, { - name: "parses no rules when they're empty 4", + name: "parses no rules when they're empty #4", args: args{settings: &models.DeploymentSettings{ TrafficFilterSettings: &models.TrafficFilterSettings{ Rulesets: []string{}, @@ -84,23 +82,7 @@ func Test_flattenTrafficFiltering(t *testing.T) { var got []string trafficFilter.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) - - checkConverionToAttrValue(t, "traffic_filter", trafficFilter) + util.CheckConverionToAttrValue(t, &DataSource{}, "traffic_filter", trafficFilter) }) } } - -// checking conversion to attr.Value -// it should catch cases when e.g. the func under test returns types.List{} -func checkConverionToAttrValue(t *testing.T, attributeName string, attributeValue types.List) { - var target types.List - diags := tfsdk.ValueFrom(context.Background(), attributeValue, attributeType(t, attributeName), &target) - assert.Nil(t, diags) -} - -func attributeType(t *testing.T, attributeName string) attr.Type { - var d DataSource - schema, diags := d.GetSchema(context.Background()) - assert.Nil(t, diags) - return schema.Attributes[attributeName].FrameworkType() -} diff --git a/ec/ecdatasource/stackdatasource/datasource.go b/ec/ecdatasource/stackdatasource/datasource.go index e75e5e8e0..c5559c687 100644 --- a/ec/ecdatasource/stackdatasource/datasource.go +++ b/ec/ecdatasource/stackdatasource/datasource.go @@ -96,7 +96,7 @@ func (d DataSource) Read(ctx context.Context, request datasource.ReadRequest, re } func modelToState(ctx context.Context, stack *models.StackVersionConfig, state *modelV0) diag.Diagnostics { - var diags diag.Diagnostics + var diagnostics diag.Diagnostics state.ID = types.String{Value: stack.Version} state.Version = types.String{Value: stack.Version} @@ -107,19 +107,27 @@ func modelToState(ctx context.Context, stack *models.StackVersionConfig, state * state.MinUpgradableFrom = types.String{Value: stack.MinUpgradableFrom} if len(stack.UpgradableTo) > 0 { - diags.Append(tfsdk.ValueFrom(ctx, stack.UpgradableTo, types.ListType{ElemType: types.StringType}, &state.UpgradableTo)...) + diagnostics.Append(tfsdk.ValueFrom(ctx, stack.UpgradableTo, types.ListType{ElemType: types.StringType}, &state.UpgradableTo)...) } if stack.Whitelisted != nil { state.AllowListed = types.Bool{Value: *stack.Whitelisted} } - diags.Append(flattenStackVersionApmConfig(ctx, stack.Apm, &state.Apm)...) - diags.Append(flattenStackVersionElasticsearchConfig(ctx, stack.Elasticsearch, &state.Elasticsearch)...) - diags.Append(flattenStackVersionEnterpriseSearchConfig(ctx, stack.EnterpriseSearch, &state.EnterpriseSearch)...) - diags.Append(flattenStackVersionKibanaConfig(ctx, stack.Kibana, &state.Kibana)...) + var diags diag.Diagnostics + state.Apm, diags = flattenStackVersionApmConfig(ctx, stack.Apm) + diagnostics.Append(diags...) + + state.Elasticsearch, diags = flattenStackVersionElasticsearchConfig(ctx, stack.Elasticsearch) + diagnostics.Append(diags...) + + state.EnterpriseSearch, diags = flattenStackVersionEnterpriseSearchConfig(ctx, stack.EnterpriseSearch) + diagnostics.Append(diags...) + + state.Kibana, diags = flattenStackVersionKibanaConfig(ctx, stack.Kibana) + diagnostics.Append(diags...) - return diags + return diagnostics } func stackFromFilters(expr, version string, locked bool, stacks []*models.StackVersionConfig) (*models.StackVersionConfig, error) { diff --git a/ec/ecdatasource/stackdatasource/flatteners_apm.go b/ec/ecdatasource/stackdatasource/flatteners_apm.go index ce03799d9..c4c14214d 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_apm.go +++ b/ec/ecdatasource/stackdatasource/flatteners_apm.go @@ -28,45 +28,43 @@ import ( ) // flattenStackVersionApmConfig takes a StackVersionApmConfigs and flattens it. -func flattenStackVersionApmConfig(ctx context.Context, res *models.StackVersionApmConfig, target interface{}) diag.Diagnostics { +func flattenStackVersionApmConfig(ctx context.Context, res *models.StackVersionApmConfig) (types.List, diag.Diagnostics) { var diags diag.Diagnostics model := newResourceKindConfigModelV0() - empty := true + + target := types.List{ElemType: resourceKindConfigSchema(Apm).FrameworkType().(types.ListType).ElemType} + target.Null = true if res == nil { - return diags + return target, nil } if len(res.Blacklist) > 0 { diags.Append(tfsdk.ValueFrom(ctx, res.Blacklist, types.ListType{ElemType: types.StringType}, &model.DenyList)...) - empty = false + target.Null = false } if res.CapacityConstraints != nil { model.CapacityConstraintsMax = types.Int64{Value: int64(*res.CapacityConstraints.Max)} model.CapacityConstraintsMin = types.Int64{Value: int64(*res.CapacityConstraints.Min)} - empty = false + target.Null = false } if len(res.CompatibleNodeTypes) > 0 { diags.Append(tfsdk.ValueFrom(ctx, res.CompatibleNodeTypes, types.ListType{ElemType: types.StringType}, &model.CompatibleNodeTypes)...) - empty = false + target.Null = false } if res.DockerImage != nil && *res.DockerImage != "" { model.DockerImage = types.String{Value: *res.DockerImage} - empty = false + target.Null = false } - if empty { - return diags + if target.Null { + return target, diags } - diags.Append(tfsdk.ValueFrom(ctx, []resourceKindConfigModelV0{model}, types.ListType{ - ElemType: types.ObjectType{ - AttrTypes: resourceKindConfigAttrTypes(Apm), - }, - }, target)...) + diags.Append(tfsdk.ValueFrom(ctx, []resourceKindConfigModelV0{model}, resourceKindConfigSchema(Apm).FrameworkType(), &target)...) - return diags + return target, diags } diff --git a/ec/ecdatasource/stackdatasource/flatteners_apm_test.go b/ec/ecdatasource/stackdatasource/flatteners_apm_test.go index 627f90fc8..4f5c8a77f 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_apm_test.go +++ b/ec/ecdatasource/stackdatasource/flatteners_apm_test.go @@ -40,12 +40,12 @@ func Test_flattenApmResource(t *testing.T) { want []resourceKindConfigModelV0 }{ { - name: "empty resource list returns empty list", + name: "empty resource list returns empty list #1", args: args{}, want: nil, }, { - name: "empty resource list returns empty list", + name: "empty resource list returns empty list #2", args: args{res: &models.StackVersionApmConfig{}}, want: nil, }, @@ -70,13 +70,14 @@ func Test_flattenApmResource(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var newState modelV0 - diags := flattenStackVersionApmConfig(context.Background(), tt.args.res, &newState.Apm) + apm, diags := flattenStackVersionApmConfig(context.Background(), tt.args.res) assert.Empty(t, diags) var got []resourceKindConfigModelV0 - newState.Apm.ElementsAs(context.Background(), &got, false) + apm.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) + + util.CheckConverionToAttrValue(t, &DataSource{}, "apm", apm) }) } } diff --git a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go index e003a4e40..94bae2e54 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go +++ b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go @@ -28,55 +28,53 @@ import ( ) // flattenStackVersionElasticsearchConfig takes a StackVersionElasticsearchConfig and flattens it. -func flattenStackVersionElasticsearchConfig(ctx context.Context, res *models.StackVersionElasticsearchConfig, target interface{}) diag.Diagnostics { +func flattenStackVersionElasticsearchConfig(ctx context.Context, res *models.StackVersionElasticsearchConfig) (types.List, diag.Diagnostics) { var diags diag.Diagnostics model := newElasticsearchConfigModelV0() - empty := true + + target := types.List{ElemType: elasticsearchConfigSchema().FrameworkType().(types.ListType).ElemType} + target.Null = true if res == nil { - return diags + return target, diags } if len(res.Blacklist) > 0 { diags.Append(tfsdk.ValueFrom(ctx, res.Blacklist, types.ListType{ElemType: types.StringType}, &model.DenyList)...) - empty = false + target.Null = false } if res.CapacityConstraints != nil { model.CapacityConstraintsMax = types.Int64{Value: int64(*res.CapacityConstraints.Max)} model.CapacityConstraintsMin = types.Int64{Value: int64(*res.CapacityConstraints.Min)} - empty = false + target.Null = false } if len(res.CompatibleNodeTypes) > 0 { diags.Append(tfsdk.ValueFrom(ctx, res.CompatibleNodeTypes, types.ListType{ElemType: types.StringType}, &model.CompatibleNodeTypes)...) - empty = false + target.Null = false } if res.DockerImage != nil && *res.DockerImage != "" { model.DockerImage = types.String{Value: *res.DockerImage} - empty = false + target.Null = false } if len(res.Plugins) > 0 { diags.Append(tfsdk.ValueFrom(ctx, res.Plugins, types.ListType{ElemType: types.StringType}, &model.Plugins)...) - empty = false + target.Null = false } if len(res.DefaultPlugins) > 0 { diags.Append(tfsdk.ValueFrom(ctx, res.DefaultPlugins, types.ListType{ElemType: types.StringType}, &model.DefaultPlugins)...) - empty = false + target.Null = false } - if empty { - return diags + if target.Null { + return target, diags } - diags.Append(tfsdk.ValueFrom(ctx, []elasticsearchConfigModelV0{model}, types.ListType{ - ElemType: types.ObjectType{ - AttrTypes: elasticsearchConfigAttrTypes(), - }, - }, target)...) + diags.Append(tfsdk.ValueFrom(ctx, []elasticsearchConfigModelV0{model}, elasticsearchConfigSchema().FrameworkType(), &target)...) - return diags + return target, diags } diff --git a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go index 80de85305..590d94e92 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go +++ b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go @@ -40,12 +40,12 @@ func Test_flattenElasticsearchResource(t *testing.T) { want []elasticsearchConfigModelV0 }{ { - name: "empty resource list returns empty list", + name: "empty resource list returns empty list #1", args: args{}, want: nil, }, { - name: "empty resource list returns empty list", + name: "empty resource list returns empty list #2", args: args{res: &models.StackVersionElasticsearchConfig{}}, want: nil, }, @@ -102,13 +102,14 @@ func Test_flattenElasticsearchResource(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var newState modelV0 - diags := flattenStackVersionElasticsearchConfig(context.Background(), tt.args.res, &newState.Elasticsearch) + elasticsearch, diags := flattenStackVersionElasticsearchConfig(context.Background(), tt.args.res) assert.Empty(t, diags) var got []elasticsearchConfigModelV0 - newState.Elasticsearch.ElementsAs(context.Background(), &got, false) + elasticsearch.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) + + util.CheckConverionToAttrValue(t, &DataSource{}, "elasticsearch", elasticsearch) }) } } diff --git a/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go b/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go index 0db37b9f1..f6e5a4ee5 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go +++ b/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go @@ -28,45 +28,43 @@ import ( ) // flattenStackVersionEnterpriseSearchConfig takes a StackVersionEnterpriseSearchConfig and flattens it. -func flattenStackVersionEnterpriseSearchConfig(ctx context.Context, res *models.StackVersionEnterpriseSearchConfig, target interface{}) diag.Diagnostics { +func flattenStackVersionEnterpriseSearchConfig(ctx context.Context, res *models.StackVersionEnterpriseSearchConfig) (types.List, diag.Diagnostics) { var diags diag.Diagnostics model := newResourceKindConfigModelV0() - empty := true + + target := types.List{ElemType: resourceKindConfigSchema(EnterpriseSearch).FrameworkType().(types.ListType).ElemType} + target.Null = true if res == nil { - return diags + return target, diags } if len(res.Blacklist) > 0 { diags.Append(tfsdk.ValueFrom(ctx, res.Blacklist, types.ListType{ElemType: types.StringType}, &model.DenyList)...) - empty = false + target.Null = false } if res.CapacityConstraints != nil { model.CapacityConstraintsMax = types.Int64{Value: int64(*res.CapacityConstraints.Max)} model.CapacityConstraintsMin = types.Int64{Value: int64(*res.CapacityConstraints.Min)} - empty = false + target.Null = false } if len(res.CompatibleNodeTypes) > 0 { diags.Append(tfsdk.ValueFrom(ctx, res.CompatibleNodeTypes, types.ListType{ElemType: types.StringType}, &model.CompatibleNodeTypes)...) - empty = false + target.Null = false } if res.DockerImage != nil && *res.DockerImage != "" { model.DockerImage = types.String{Value: *res.DockerImage} - empty = false + target.Null = false } - if empty { - return diags + if target.Null { + return target, diags } - diags.Append(tfsdk.ValueFrom(ctx, []resourceKindConfigModelV0{model}, types.ListType{ - ElemType: types.ObjectType{ - AttrTypes: resourceKindConfigAttrTypes(EnterpriseSearch), - }, - }, target)...) + diags.Append(tfsdk.ValueFrom(ctx, []resourceKindConfigModelV0{model}, resourceKindConfigSchema(EnterpriseSearch).FrameworkType(), &target)...) - return diags + return target, diags } diff --git a/ec/ecdatasource/stackdatasource/flatteners_enterprise_search_test.go b/ec/ecdatasource/stackdatasource/flatteners_enterprise_search_test.go index dd39145cb..d55a7a146 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_enterprise_search_test.go +++ b/ec/ecdatasource/stackdatasource/flatteners_enterprise_search_test.go @@ -40,12 +40,12 @@ func Test_flattenEnterpriseSearchResources(t *testing.T) { want []resourceKindConfigModelV0 }{ { - name: "empty resource list returns empty list", + name: "empty resource list returns empty list #1", args: args{}, want: nil, }, { - name: "empty resource list returns empty list", + name: "empty resource list returns empty list #2", args: args{res: &models.StackVersionEnterpriseSearchConfig{}}, want: nil, }, @@ -70,13 +70,14 @@ func Test_flattenEnterpriseSearchResources(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var newState modelV0 - diags := flattenStackVersionEnterpriseSearchConfig(context.Background(), tt.args.res, &newState.EnterpriseSearch) + enterpriseSearch, diags := flattenStackVersionEnterpriseSearchConfig(context.Background(), tt.args.res) assert.Empty(t, diags) var got []resourceKindConfigModelV0 - newState.EnterpriseSearch.ElementsAs(context.Background(), &got, false) + enterpriseSearch.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) + + util.CheckConverionToAttrValue(t, &DataSource{}, "enterprise_search", enterpriseSearch) }) } } diff --git a/ec/ecdatasource/stackdatasource/flatteners_kibana.go b/ec/ecdatasource/stackdatasource/flatteners_kibana.go index d7eca89db..bdfd6721b 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_kibana.go +++ b/ec/ecdatasource/stackdatasource/flatteners_kibana.go @@ -28,45 +28,43 @@ import ( ) // flattenStackVersionKibanaConfig takes a StackVersionKibanaConfig and flattens it. -func flattenStackVersionKibanaConfig(ctx context.Context, res *models.StackVersionKibanaConfig, target interface{}) diag.Diagnostics { +func flattenStackVersionKibanaConfig(ctx context.Context, res *models.StackVersionKibanaConfig) (types.List, diag.Diagnostics) { var diags diag.Diagnostics model := newResourceKindConfigModelV0() - empty := true + + target := types.List{ElemType: resourceKindConfigSchema(Kibana).FrameworkType().(types.ListType).ElemType} + target.Null = true if res == nil { - return diags + return target, diags } if len(res.Blacklist) > 0 { diags.Append(tfsdk.ValueFrom(ctx, res.Blacklist, types.ListType{ElemType: types.StringType}, &model.DenyList)...) - empty = false + target.Null = false } if res.CapacityConstraints != nil { model.CapacityConstraintsMax = types.Int64{Value: int64(*res.CapacityConstraints.Max)} model.CapacityConstraintsMin = types.Int64{Value: int64(*res.CapacityConstraints.Min)} - empty = false + target.Null = false } if len(res.CompatibleNodeTypes) > 0 { diags.Append(tfsdk.ValueFrom(ctx, res.CompatibleNodeTypes, types.ListType{ElemType: types.StringType}, &model.CompatibleNodeTypes)...) - empty = false + target.Null = false } if res.DockerImage != nil && *res.DockerImage != "" { model.DockerImage = types.String{Value: *res.DockerImage} - empty = false + target.Null = false } - if empty { - return diags + if target.Null { + return target, diags } - diags.Append(tfsdk.ValueFrom(ctx, []resourceKindConfigModelV0{model}, types.ListType{ - ElemType: types.ObjectType{ - AttrTypes: resourceKindConfigAttrTypes(Kibana), - }, - }, target)...) + diags.Append(tfsdk.ValueFrom(ctx, []resourceKindConfigModelV0{model}, resourceKindConfigSchema(Kibana).FrameworkType(), &target)...) - return diags + return target, diags } diff --git a/ec/ecdatasource/stackdatasource/flatteners_kibana_test.go b/ec/ecdatasource/stackdatasource/flatteners_kibana_test.go index 513199bc9..8ab97d2a2 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_kibana_test.go +++ b/ec/ecdatasource/stackdatasource/flatteners_kibana_test.go @@ -40,12 +40,12 @@ func Test_flattenKibanaResources(t *testing.T) { want []resourceKindConfigModelV0 }{ { - name: "empty resource list returns empty list", + name: "empty resource list returns empty list #1", args: args{}, want: nil, }, { - name: "empty resource list returns empty list", + name: "empty resource list returns empty list #2", args: args{res: &models.StackVersionKibanaConfig{}}, want: nil, }, @@ -70,13 +70,14 @@ func Test_flattenKibanaResources(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var newState modelV0 - diags := flattenStackVersionKibanaConfig(context.Background(), tt.args.res, &newState.Kibana) + kibana, diags := flattenStackVersionKibanaConfig(context.Background(), tt.args.res) assert.Empty(t, diags) var got []resourceKindConfigModelV0 - newState.Kibana.ElementsAs(context.Background(), &got, false) + kibana.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) + + util.CheckConverionToAttrValue(t, &DataSource{}, "kibana", kibana) }) } } diff --git a/ec/internal/util/testutils.go b/ec/internal/util/testutils.go index ee5e5491f..287104f6f 100644 --- a/ec/internal/util/testutils.go +++ b/ec/internal/util/testutils.go @@ -20,10 +20,15 @@ package util import ( "context" "errors" + "fmt" "testing" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/stretchr/testify/assert" "github.com/elastic/cloud-sdk-go/pkg/multierror" ) @@ -107,3 +112,15 @@ func generateRD(t *testing.T, schemaMap map[string]*schema.Schema, rawAttr map[s return result } + +// Check conversion to attr.Value +// it should catch cases when e.g. the func under test returns types.List{} +func CheckConverionToAttrValue(t *testing.T, dt datasource.DataSource, attributeName string, attributeValue types.List) { + schema, diags := dt.GetSchema(context.Background()) + assert.Nil(t, diags) + attrType := schema.Attributes[attributeName].FrameworkType() + assert.NotNil(t, attrType, fmt.Sprintf("Type of attribute '%s' cannot be nil", attributeName)) + var target types.List + diags = tfsdk.ValueFrom(context.Background(), attributeValue, attrType, &target) + assert.Nil(t, diags) +} From d1208af5dac10228dcbf9bc680adb751af3f289f Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Fri, 13 Jan 2023 17:05:48 +0100 Subject: [PATCH 077/104] Renaming --- ec/ecdatasource/stackdatasource/datasource.go | 8 ++++---- ec/ecdatasource/stackdatasource/flatteners_apm.go | 4 ++-- ec/ecdatasource/stackdatasource/flatteners_apm_test.go | 2 +- .../stackdatasource/flatteners_elasticsearch.go | 4 ++-- .../stackdatasource/flatteners_elasticsearch_test.go | 2 +- .../stackdatasource/flatteners_enterprise_search.go | 4 ++-- .../stackdatasource/flatteners_enterprise_search_test.go | 2 +- ec/ecdatasource/stackdatasource/flatteners_kibana.go | 4 ++-- ec/ecdatasource/stackdatasource/flatteners_kibana_test.go | 2 +- 9 files changed, 16 insertions(+), 16 deletions(-) diff --git a/ec/ecdatasource/stackdatasource/datasource.go b/ec/ecdatasource/stackdatasource/datasource.go index c5559c687..b4e9aefd4 100644 --- a/ec/ecdatasource/stackdatasource/datasource.go +++ b/ec/ecdatasource/stackdatasource/datasource.go @@ -115,16 +115,16 @@ func modelToState(ctx context.Context, stack *models.StackVersionConfig, state * } var diags diag.Diagnostics - state.Apm, diags = flattenStackVersionApmConfig(ctx, stack.Apm) + state.Apm, diags = flattenApmConfig(ctx, stack.Apm) diagnostics.Append(diags...) - state.Elasticsearch, diags = flattenStackVersionElasticsearchConfig(ctx, stack.Elasticsearch) + state.Elasticsearch, diags = flattenElasticsearchConfig(ctx, stack.Elasticsearch) diagnostics.Append(diags...) - state.EnterpriseSearch, diags = flattenStackVersionEnterpriseSearchConfig(ctx, stack.EnterpriseSearch) + state.EnterpriseSearch, diags = flattenEnterpriseSearchConfig(ctx, stack.EnterpriseSearch) diagnostics.Append(diags...) - state.Kibana, diags = flattenStackVersionKibanaConfig(ctx, stack.Kibana) + state.Kibana, diags = flattenKibanaConfig(ctx, stack.Kibana) diagnostics.Append(diags...) return diagnostics diff --git a/ec/ecdatasource/stackdatasource/flatteners_apm.go b/ec/ecdatasource/stackdatasource/flatteners_apm.go index c4c14214d..a3bf992bf 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_apm.go +++ b/ec/ecdatasource/stackdatasource/flatteners_apm.go @@ -27,8 +27,8 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/models" ) -// flattenStackVersionApmConfig takes a StackVersionApmConfigs and flattens it. -func flattenStackVersionApmConfig(ctx context.Context, res *models.StackVersionApmConfig) (types.List, diag.Diagnostics) { +// flattenApmConfig takes a StackVersionApmConfigs and flattens it. +func flattenApmConfig(ctx context.Context, res *models.StackVersionApmConfig) (types.List, diag.Diagnostics) { var diags diag.Diagnostics model := newResourceKindConfigModelV0() diff --git a/ec/ecdatasource/stackdatasource/flatteners_apm_test.go b/ec/ecdatasource/stackdatasource/flatteners_apm_test.go index 4f5c8a77f..ffb2a3d0c 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_apm_test.go +++ b/ec/ecdatasource/stackdatasource/flatteners_apm_test.go @@ -70,7 +70,7 @@ func Test_flattenApmResource(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - apm, diags := flattenStackVersionApmConfig(context.Background(), tt.args.res) + apm, diags := flattenApmConfig(context.Background(), tt.args.res) assert.Empty(t, diags) var got []resourceKindConfigModelV0 diff --git a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go index 94bae2e54..820447afd 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go +++ b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go @@ -27,8 +27,8 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/models" ) -// flattenStackVersionElasticsearchConfig takes a StackVersionElasticsearchConfig and flattens it. -func flattenStackVersionElasticsearchConfig(ctx context.Context, res *models.StackVersionElasticsearchConfig) (types.List, diag.Diagnostics) { +// flattenElasticsearchConfig takes a StackVersionElasticsearchConfig and flattens it. +func flattenElasticsearchConfig(ctx context.Context, res *models.StackVersionElasticsearchConfig) (types.List, diag.Diagnostics) { var diags diag.Diagnostics model := newElasticsearchConfigModelV0() diff --git a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go index 590d94e92..dcb2f29d8 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go +++ b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go @@ -102,7 +102,7 @@ func Test_flattenElasticsearchResource(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - elasticsearch, diags := flattenStackVersionElasticsearchConfig(context.Background(), tt.args.res) + elasticsearch, diags := flattenElasticsearchConfig(context.Background(), tt.args.res) assert.Empty(t, diags) var got []elasticsearchConfigModelV0 diff --git a/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go b/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go index f6e5a4ee5..34c6b737a 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go +++ b/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go @@ -27,8 +27,8 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/models" ) -// flattenStackVersionEnterpriseSearchConfig takes a StackVersionEnterpriseSearchConfig and flattens it. -func flattenStackVersionEnterpriseSearchConfig(ctx context.Context, res *models.StackVersionEnterpriseSearchConfig) (types.List, diag.Diagnostics) { +// flattenEnterpriseSearchConfig takes a StackVersionEnterpriseSearchConfig and flattens it. +func flattenEnterpriseSearchConfig(ctx context.Context, res *models.StackVersionEnterpriseSearchConfig) (types.List, diag.Diagnostics) { var diags diag.Diagnostics model := newResourceKindConfigModelV0() diff --git a/ec/ecdatasource/stackdatasource/flatteners_enterprise_search_test.go b/ec/ecdatasource/stackdatasource/flatteners_enterprise_search_test.go index d55a7a146..ced9bdeff 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_enterprise_search_test.go +++ b/ec/ecdatasource/stackdatasource/flatteners_enterprise_search_test.go @@ -70,7 +70,7 @@ func Test_flattenEnterpriseSearchResources(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - enterpriseSearch, diags := flattenStackVersionEnterpriseSearchConfig(context.Background(), tt.args.res) + enterpriseSearch, diags := flattenEnterpriseSearchConfig(context.Background(), tt.args.res) assert.Empty(t, diags) var got []resourceKindConfigModelV0 diff --git a/ec/ecdatasource/stackdatasource/flatteners_kibana.go b/ec/ecdatasource/stackdatasource/flatteners_kibana.go index bdfd6721b..ad770b8dd 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_kibana.go +++ b/ec/ecdatasource/stackdatasource/flatteners_kibana.go @@ -27,8 +27,8 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/models" ) -// flattenStackVersionKibanaConfig takes a StackVersionKibanaConfig and flattens it. -func flattenStackVersionKibanaConfig(ctx context.Context, res *models.StackVersionKibanaConfig) (types.List, diag.Diagnostics) { +// flattenKibanaConfig takes a StackVersionKibanaConfig and flattens it. +func flattenKibanaConfig(ctx context.Context, res *models.StackVersionKibanaConfig) (types.List, diag.Diagnostics) { var diags diag.Diagnostics model := newResourceKindConfigModelV0() diff --git a/ec/ecdatasource/stackdatasource/flatteners_kibana_test.go b/ec/ecdatasource/stackdatasource/flatteners_kibana_test.go index 8ab97d2a2..266a28be5 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_kibana_test.go +++ b/ec/ecdatasource/stackdatasource/flatteners_kibana_test.go @@ -70,7 +70,7 @@ func Test_flattenKibanaResources(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - kibana, diags := flattenStackVersionKibanaConfig(context.Background(), tt.args.res) + kibana, diags := flattenKibanaConfig(context.Background(), tt.args.res) assert.Empty(t, diags) var got []resourceKindConfigModelV0 From dda35864d280c73aa162fbcb5be8e63746f1dbe2 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Fri, 13 Jan 2023 17:43:24 +0100 Subject: [PATCH 078/104] move ResourceKind to util --- .../deploymentsdatasource/expanders_test.go | 36 ++++++++-------- .../deploymentsdatasource/schema.go | 42 ++++--------------- .../stackdatasource/datasource_test.go | 12 +++--- .../stackdatasource/flatteners_apm.go | 5 ++- .../flatteners_enterprise_search.go | 5 ++- .../stackdatasource/flatteners_kibana.go | 5 ++- ec/ecdatasource/stackdatasource/schema.go | 28 ++++--------- 7 files changed, 49 insertions(+), 84 deletions(-) diff --git a/ec/ecdatasource/deploymentsdatasource/expanders_test.go b/ec/ecdatasource/deploymentsdatasource/expanders_test.go index dc01f56e6..41ffc4851 100644 --- a/ec/ecdatasource/deploymentsdatasource/expanders_test.go +++ b/ec/ecdatasource/deploymentsdatasource/expanders_test.go @@ -72,9 +72,9 @@ func Test_expandFilters(t *testing.T) { Size: types.Int64{Value: 200}, Tags: util.StringMapAsType(map[string]string{"foo": "bar"}), Elasticsearch: types.List{ - ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(Elasticsearch)}, + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(util.ElasticsearchResourceKind)}, Elems: []attr.Value{types.Object{ - AttrTypes: resourceFiltersAttrTypes(Elasticsearch), + AttrTypes: resourceFiltersAttrTypes(util.ElasticsearchResourceKind), Attrs: map[string]attr.Value{ "healthy": types.String{Null: true}, "status": types.String{Null: true}, @@ -83,9 +83,9 @@ func Test_expandFilters(t *testing.T) { }}, }, Kibana: types.List{ - ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(Kibana)}, + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(util.KibanaResourceKind)}, Elems: []attr.Value{types.Object{ - AttrTypes: resourceFiltersAttrTypes(Kibana), + AttrTypes: resourceFiltersAttrTypes(util.KibanaResourceKind), Attrs: map[string]attr.Value{ "healthy": types.String{Null: true}, "status": types.String{Value: "started"}, @@ -94,9 +94,9 @@ func Test_expandFilters(t *testing.T) { }}, }, Apm: types.List{ - ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(Apm)}, + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(util.ApmResourceKind)}, Elems: []attr.Value{types.Object{ - AttrTypes: resourceFiltersAttrTypes(Apm), + AttrTypes: resourceFiltersAttrTypes(util.ApmResourceKind), Attrs: map[string]attr.Value{ "healthy": types.String{Value: "true"}, "status": types.String{Null: true}, @@ -105,9 +105,9 @@ func Test_expandFilters(t *testing.T) { }}, }, EnterpriseSearch: types.List{ - ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(EnterpriseSearch)}, + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(util.EnterpriseSearchResourceKind)}, Elems: []attr.Value{types.Object{ - AttrTypes: resourceFiltersAttrTypes(EnterpriseSearch), + AttrTypes: resourceFiltersAttrTypes(util.EnterpriseSearchResourceKind), Attrs: map[string]attr.Value{ "status": types.String{Null: true}, "healthy": types.String{Value: "false"}, @@ -167,9 +167,9 @@ func newInvalidFilters() modelV0 { return modelV0{ Healthy: types.String{Value: "invalid value"}, Apm: types.List{ - ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(Apm)}, + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(util.ApmResourceKind)}, Elems: []attr.Value{types.Object{ - AttrTypes: resourceFiltersAttrTypes(Apm), + AttrTypes: resourceFiltersAttrTypes(util.ApmResourceKind), Attrs: map[string]attr.Value{ "healthy": types.String{Value: "invalid value"}, }, @@ -187,9 +187,9 @@ func newSampleFilters() modelV0 { "foo": types.String{Value: "bar"}, }}, Elasticsearch: types.List{ - ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(Elasticsearch)}, + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(util.ElasticsearchResourceKind)}, Elems: []attr.Value{types.Object{ - AttrTypes: resourceFiltersAttrTypes(Elasticsearch), + AttrTypes: resourceFiltersAttrTypes(util.ElasticsearchResourceKind), Attrs: map[string]attr.Value{ "healthy": types.String{Null: true}, "status": types.String{Null: true}, @@ -198,9 +198,9 @@ func newSampleFilters() modelV0 { }}, }, Kibana: types.List{ - ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(Kibana)}, + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(util.KibanaResourceKind)}, Elems: []attr.Value{types.Object{ - AttrTypes: resourceFiltersAttrTypes(Kibana), + AttrTypes: resourceFiltersAttrTypes(util.KibanaResourceKind), Attrs: map[string]attr.Value{ "healthy": types.String{Null: true}, "status": types.String{Value: "started"}, @@ -209,9 +209,9 @@ func newSampleFilters() modelV0 { }}, }, Apm: types.List{ - ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(Apm)}, + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(util.ApmResourceKind)}, Elems: []attr.Value{types.Object{ - AttrTypes: resourceFiltersAttrTypes(Apm), + AttrTypes: resourceFiltersAttrTypes(util.ApmResourceKind), Attrs: map[string]attr.Value{ "healthy": types.String{Value: "true"}, "status": types.String{Null: true}, @@ -220,9 +220,9 @@ func newSampleFilters() modelV0 { }}, }, EnterpriseSearch: types.List{ - ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(EnterpriseSearch)}, + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(util.EnterpriseSearchResourceKind)}, Elems: []attr.Value{types.Object{ - AttrTypes: resourceFiltersAttrTypes(EnterpriseSearch), + AttrTypes: resourceFiltersAttrTypes(util.EnterpriseSearchResourceKind), Attrs: map[string]attr.Value{ "status": types.String{Null: true}, "healthy": types.String{Value: "false"}, diff --git a/ec/ecdatasource/deploymentsdatasource/schema.go b/ec/ecdatasource/deploymentsdatasource/schema.go index ea7123f04..52e9fd724 100644 --- a/ec/ecdatasource/deploymentsdatasource/schema.go +++ b/ec/ecdatasource/deploymentsdatasource/schema.go @@ -27,16 +27,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" -) - -type ResourceKind int - -const ( - Apm ResourceKind = iota - Elasticsearch - EnterpriseSearch - IntegrationsServer - Kibana + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func (d *DataSource) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { @@ -87,11 +78,11 @@ func (d *DataSource) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnost }, Blocks: map[string]tfsdk.Block{ // Deployment resources - "elasticsearch": resourceFiltersSchema(Elasticsearch), - "kibana": resourceFiltersSchema(Kibana), - "apm": resourceFiltersSchema(Apm), - "integrations_server": resourceFiltersSchema(IntegrationsServer), - "enterprise_search": resourceFiltersSchema(EnterpriseSearch), + "elasticsearch": resourceFiltersSchema(util.ElasticsearchResourceKind), + "kibana": resourceFiltersSchema(util.KibanaResourceKind), + "apm": resourceFiltersSchema(util.ApmResourceKind), + "integrations_server": resourceFiltersSchema(util.IntegrationsServerResourceKind), + "enterprise_search": resourceFiltersSchema(util.EnterpriseSearchResourceKind), }, }, nil } @@ -175,24 +166,7 @@ func deploymentAttrTypes() map[string]attr.Type { } -func (rk ResourceKind) Name() string { - switch rk { - case Apm: - return "APM" - case Elasticsearch: - return "Elasticsearch" - case EnterpriseSearch: - return "Enterprise Search" - case IntegrationsServer: - return "Integrations Server" - case Kibana: - return "Kibana" - default: - return "unknown" - } -} - -func resourceFiltersSchema(resourceKind ResourceKind) tfsdk.Block { +func resourceFiltersSchema(resourceKind util.ResourceKind) tfsdk.Block { return tfsdk.Block{ Description: fmt.Sprintf("Filter by %s resource kind status or configuration.", resourceKind.Name()), NestingMode: tfsdk.BlockNestingModeList, @@ -213,7 +187,7 @@ func resourceFiltersSchema(resourceKind ResourceKind) tfsdk.Block { } } -func resourceFiltersAttrTypes(resourceKind ResourceKind) map[string]attr.Type { +func resourceFiltersAttrTypes(resourceKind util.ResourceKind) map[string]attr.Type { return resourceFiltersSchema(resourceKind).Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes } diff --git a/ec/ecdatasource/stackdatasource/datasource_test.go b/ec/ecdatasource/stackdatasource/datasource_test.go index 0d18680f0..17c7e345c 100644 --- a/ec/ecdatasource/stackdatasource/datasource_test.go +++ b/ec/ecdatasource/stackdatasource/datasource_test.go @@ -166,10 +166,10 @@ func newSampleStack() modelV0 { }, Kibana: types.List{ ElemType: types.ObjectType{ - AttrTypes: resourceKindConfigAttrTypes(Kibana), + AttrTypes: resourceKindConfigAttrTypes(util.KibanaResourceKind), }, Elems: []attr.Value{types.Object{ - AttrTypes: resourceKindConfigAttrTypes(Kibana), + AttrTypes: resourceKindConfigAttrTypes(util.KibanaResourceKind), Attrs: map[string]attr.Value{ "denylist": util.StringListAsType([]string{"some"}), "capacity_constraints_max": types.Int64{Value: 8192}, @@ -181,10 +181,10 @@ func newSampleStack() modelV0 { }, EnterpriseSearch: types.List{ ElemType: types.ObjectType{ - AttrTypes: resourceKindConfigAttrTypes(EnterpriseSearch), + AttrTypes: resourceKindConfigAttrTypes(util.EnterpriseSearchResourceKind), }, Elems: []attr.Value{types.Object{ - AttrTypes: resourceKindConfigAttrTypes(EnterpriseSearch), + AttrTypes: resourceKindConfigAttrTypes(util.EnterpriseSearchResourceKind), Attrs: map[string]attr.Value{ "denylist": util.StringListAsType([]string{"some"}), "capacity_constraints_max": types.Int64{Value: 8192}, @@ -196,10 +196,10 @@ func newSampleStack() modelV0 { }, Apm: types.List{ ElemType: types.ObjectType{ - AttrTypes: resourceKindConfigAttrTypes(Apm), + AttrTypes: resourceKindConfigAttrTypes(util.ApmResourceKind), }, Elems: []attr.Value{types.Object{ - AttrTypes: resourceKindConfigAttrTypes(Apm), + AttrTypes: resourceKindConfigAttrTypes(util.ApmResourceKind), Attrs: map[string]attr.Value{ "denylist": util.StringListAsType([]string{"some"}), "capacity_constraints_max": types.Int64{Value: 8192}, diff --git a/ec/ecdatasource/stackdatasource/flatteners_apm.go b/ec/ecdatasource/stackdatasource/flatteners_apm.go index a3bf992bf..e1ad0be79 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_apm.go +++ b/ec/ecdatasource/stackdatasource/flatteners_apm.go @@ -25,6 +25,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) // flattenApmConfig takes a StackVersionApmConfigs and flattens it. @@ -32,7 +33,7 @@ func flattenApmConfig(ctx context.Context, res *models.StackVersionApmConfig) (t var diags diag.Diagnostics model := newResourceKindConfigModelV0() - target := types.List{ElemType: resourceKindConfigSchema(Apm).FrameworkType().(types.ListType).ElemType} + target := types.List{ElemType: resourceKindConfigSchema(util.ApmResourceKind).FrameworkType().(types.ListType).ElemType} target.Null = true if res == nil { @@ -64,7 +65,7 @@ func flattenApmConfig(ctx context.Context, res *models.StackVersionApmConfig) (t return target, diags } - diags.Append(tfsdk.ValueFrom(ctx, []resourceKindConfigModelV0{model}, resourceKindConfigSchema(Apm).FrameworkType(), &target)...) + diags.Append(tfsdk.ValueFrom(ctx, []resourceKindConfigModelV0{model}, resourceKindConfigSchema(util.ApmResourceKind).FrameworkType(), &target)...) return target, diags } diff --git a/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go b/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go index 34c6b737a..038400661 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go +++ b/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go @@ -25,6 +25,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) // flattenEnterpriseSearchConfig takes a StackVersionEnterpriseSearchConfig and flattens it. @@ -32,7 +33,7 @@ func flattenEnterpriseSearchConfig(ctx context.Context, res *models.StackVersion var diags diag.Diagnostics model := newResourceKindConfigModelV0() - target := types.List{ElemType: resourceKindConfigSchema(EnterpriseSearch).FrameworkType().(types.ListType).ElemType} + target := types.List{ElemType: resourceKindConfigSchema(util.EnterpriseSearchResourceKind).FrameworkType().(types.ListType).ElemType} target.Null = true if res == nil { @@ -64,7 +65,7 @@ func flattenEnterpriseSearchConfig(ctx context.Context, res *models.StackVersion return target, diags } - diags.Append(tfsdk.ValueFrom(ctx, []resourceKindConfigModelV0{model}, resourceKindConfigSchema(EnterpriseSearch).FrameworkType(), &target)...) + diags.Append(tfsdk.ValueFrom(ctx, []resourceKindConfigModelV0{model}, resourceKindConfigSchema(util.EnterpriseSearchResourceKind).FrameworkType(), &target)...) return target, diags } diff --git a/ec/ecdatasource/stackdatasource/flatteners_kibana.go b/ec/ecdatasource/stackdatasource/flatteners_kibana.go index ad770b8dd..6d9b2a98c 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_kibana.go +++ b/ec/ecdatasource/stackdatasource/flatteners_kibana.go @@ -25,6 +25,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) // flattenKibanaConfig takes a StackVersionKibanaConfig and flattens it. @@ -32,7 +33,7 @@ func flattenKibanaConfig(ctx context.Context, res *models.StackVersionKibanaConf var diags diag.Diagnostics model := newResourceKindConfigModelV0() - target := types.List{ElemType: resourceKindConfigSchema(Kibana).FrameworkType().(types.ListType).ElemType} + target := types.List{ElemType: resourceKindConfigSchema(util.KibanaResourceKind).FrameworkType().(types.ListType).ElemType} target.Null = true if res == nil { @@ -64,7 +65,7 @@ func flattenKibanaConfig(ctx context.Context, res *models.StackVersionKibanaConf return target, diags } - diags.Append(tfsdk.ValueFrom(ctx, []resourceKindConfigModelV0{model}, resourceKindConfigSchema(Kibana).FrameworkType(), &target)...) + diags.Append(tfsdk.ValueFrom(ctx, []resourceKindConfigModelV0{model}, resourceKindConfigSchema(util.KibanaResourceKind).FrameworkType(), &target)...) return target, diags } diff --git a/ec/ecdatasource/stackdatasource/schema.go b/ec/ecdatasource/stackdatasource/schema.go index 9a9ea3b3b..042b9851d 100644 --- a/ec/ecdatasource/stackdatasource/schema.go +++ b/ec/ecdatasource/stackdatasource/schema.go @@ -21,6 +21,7 @@ import ( "context" "fmt" + "github.com/elastic/terraform-provider-ec/ec/internal/util" "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/diag" @@ -28,14 +29,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" ) -type ResourceKind int - -const ( - Apm ResourceKind = iota - EnterpriseSearch - Kibana -) - func (d *DataSource) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { return tfsdk.Schema{ Attributes: map[string]tfsdk.Attribute{ @@ -78,10 +71,10 @@ func (d *DataSource) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnost Type: types.BoolType, Computed: true, }, - "apm": resourceKindConfigSchema(Apm), - "enterprise_search": resourceKindConfigSchema(EnterpriseSearch), + "apm": resourceKindConfigSchema(util.ApmResourceKind), + "enterprise_search": resourceKindConfigSchema(util.EnterpriseSearchResourceKind), "elasticsearch": elasticsearchConfigSchema(), - "kibana": resourceKindConfigSchema(Kibana), + "kibana": resourceKindConfigSchema(util.KibanaResourceKind), }, }, nil } @@ -138,14 +131,9 @@ func elasticsearchConfigAttrTypes() map[string]attr.Type { return elasticsearchConfigSchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes } -func resourceKindConfigSchema(resourceKind ResourceKind) tfsdk.Attribute { - var names = map[ResourceKind]string{ - Apm: "APM", - EnterpriseSearch: "Enterprise Search", - Kibana: "Kibana", - } +func resourceKindConfigSchema(resourceKind util.ResourceKind) tfsdk.Attribute { return tfsdk.Attribute{ - Description: fmt.Sprintf("Information for %s workloads on this stack version.", names[resourceKind]), + Description: fmt.Sprintf("Information for %s workloads on this stack version.", resourceKind.Name()), Computed: true, Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ @@ -171,7 +159,7 @@ func resourceKindConfigSchema(resourceKind ResourceKind) tfsdk.Attribute { }, "docker_image": { Type: types.StringType, - Description: fmt.Sprintf("Docker image to use for the %s instance.", names[resourceKind]), + Description: fmt.Sprintf("Docker image to use for the %s instance.", resourceKind.Name()), Computed: true, }, // node_types not added. It is highly unlikely they will be used @@ -181,7 +169,7 @@ func resourceKindConfigSchema(resourceKind ResourceKind) tfsdk.Attribute { } } -func resourceKindConfigAttrTypes(resourceKind ResourceKind) map[string]attr.Type { +func resourceKindConfigAttrTypes(resourceKind util.ResourceKind) map[string]attr.Type { return resourceKindConfigSchema(resourceKind).Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes } From 2b07835769af80dc83878722587d69fed2ecf760 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Fri, 13 Jan 2023 17:51:48 +0100 Subject: [PATCH 079/104] Replace any interface arg with returned type in trafficfilterresource --- ec/ecresource/trafficfilterresource/flatteners.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/ec/ecresource/trafficfilterresource/flatteners.go b/ec/ecresource/trafficfilterresource/flatteners.go index d44c24a9e..6cc1338a5 100644 --- a/ec/ecresource/trafficfilterresource/flatteners.go +++ b/ec/ecresource/trafficfilterresource/flatteners.go @@ -28,14 +28,13 @@ import ( ) func modelToState(ctx context.Context, res *models.TrafficFilterRulesetInfo, state *modelV0) diag.Diagnostics { - var diags diag.Diagnostics - state.Name = types.String{Value: *res.Name} state.Region = types.String{Value: *res.Region} state.Type = types.String{Value: *res.Type} state.IncludeByDefault = types.Bool{Value: *res.IncludeByDefault} - diags.Append(flattenRules(ctx, res.Rules, &state.Rule)...) + var diags diag.Diagnostics + state.Rule, diags = flattenRules(ctx, res.Rules) if res.Description == "" { state.Description = types.String{Null: true} @@ -46,8 +45,7 @@ func modelToState(ctx context.Context, res *models.TrafficFilterRulesetInfo, sta return diags } -func flattenRules(ctx context.Context, rules []*models.TrafficFilterRule, target interface{}) diag.Diagnostics { - var diags diag.Diagnostics +func flattenRules(ctx context.Context, rules []*models.TrafficFilterRule) (types.Set, diag.Diagnostics) { var result = make([]trafficFilterRuleModelV0, 0, len(rules)) for _, rule := range rules { model := trafficFilterRuleModelV0{ @@ -77,7 +75,9 @@ func flattenRules(ctx context.Context, rules []*models.TrafficFilterRule, target result = append(result, model) } - diags.Append(tfsdk.ValueFrom(ctx, result, trafficFilterRuleSetType(), target)...) + target := types.Set{ElemType: trafficFilterRuleSetType().(types.SetType).ElementType()} - return diags + diags := tfsdk.ValueFrom(ctx, result, trafficFilterRuleSetType(), &target) + + return target, diags } From b30c20740553c9c670aec2790a856231feafe504 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Fri, 13 Jan 2023 17:52:16 +0100 Subject: [PATCH 080/104] add missing file --- ec/internal/util/resource_kind.go | 45 +++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 ec/internal/util/resource_kind.go diff --git a/ec/internal/util/resource_kind.go b/ec/internal/util/resource_kind.go new file mode 100644 index 000000000..7658dcd3a --- /dev/null +++ b/ec/internal/util/resource_kind.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package util + +type ResourceKind int + +const ( + ApmResourceKind ResourceKind = iota + ElasticsearchResourceKind + EnterpriseSearchResourceKind + IntegrationsServerResourceKind + KibanaResourceKind +) + +func (rk ResourceKind) Name() string { + switch rk { + case ApmResourceKind: + return "APM" + case ElasticsearchResourceKind: + return "Elasticsearch" + case EnterpriseSearchResourceKind: + return "Enterprise Search" + case IntegrationsServerResourceKind: + return "Integrations Server" + case KibanaResourceKind: + return "Kibana" + default: + return "unknown" + } +} From 42fd243eb1b5c5e457d72a86b3f3dd5b9ec8b938 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Fri, 13 Jan 2023 19:43:18 +0100 Subject: [PATCH 081/104] Update NOTICE --- NOTICE | 150 ++++++++++++++++++++++++++++----------------------------- 1 file changed, 74 insertions(+), 76 deletions(-) diff --git a/NOTICE b/NOTICE index cc2f905aa..1f541e7fe 100755 --- a/NOTICE +++ b/NOTICE @@ -1,85 +1,83 @@ terraform-provider-ec -Copyright 2022 Elasticsearch B.V. +Copyright 2022-2023 Elasticsearch B.V. This product includes software developed at Elasticsearch B.V. and third-party software developed by the licenses listed below. ========================================================================= -github.com/davecgh/go-spew 0BSD -github.com/agext/levenshtein Apache-2.0 -github.com/elastic/cloud-sdk-go Apache-2.0 -github.com/go-logr/logr Apache-2.0 -github.com/go-logr/stdr Apache-2.0 -github.com/go-openapi/analysis Apache-2.0 -github.com/go-openapi/errors Apache-2.0 -github.com/go-openapi/jsonpointer Apache-2.0 -github.com/go-openapi/jsonreference Apache-2.0 -github.com/go-openapi/loads Apache-2.0 -github.com/go-openapi/runtime Apache-2.0 -github.com/go-openapi/spec Apache-2.0 -github.com/go-openapi/strfmt Apache-2.0 -github.com/go-openapi/swag Apache-2.0 -github.com/go-openapi/validate Apache-2.0 -github.com/oklog/run Apache-2.0 -github.com/oklog/ulid Apache-2.0 -github.com/opentracing/opentracing-go Apache-2.0 -go.mongodb.org/mongo-driver Apache-2.0 -go.opentelemetry.io/otel/trace Apache-2.0 -go.opentelemetry.io/otel Apache-2.0 -google.golang.org/appengine Apache-2.0 -google.golang.org/genproto Apache-2.0 -google.golang.org/grpc Apache-2.0 -gopkg.in/yaml.v2 Apache-2.0 -gopkg.in/yaml.v3 Apache-2.0 -github.com/vmihailenco/msgpack/v4 BSD-2-Clause -github.com/vmihailenco/msgpack BSD-2-Clause -github.com/vmihailenco/tagparser BSD-2-Clause -github.com/puerkitobio/purell BSD-3-Clause -github.com/puerkitobio/urlesc BSD-3-Clause -github.com/golang/protobuf BSD-3-Clause -github.com/google/go-cmp BSD-3-Clause -github.com/pmezard/go-difflib BSD-3-Clause -golang.org/x/crypto BSD-3-Clause -golang.org/x/net BSD-3-Clause -golang.org/x/sys BSD-3-Clause -golang.org/x/text BSD-3-Clause -google.golang.org/protobuf BSD-3-Clause -github.com/apparentlymart/go-cidr MIT -github.com/asaskevich/govalidator MIT -github.com/blang/semver/v4 MIT -github.com/fatih/color MIT -github.com/hashicorp/go-cty MIT -github.com/hashicorp/go-hclog MIT -github.com/josharian/intern MIT -github.com/mailru/easyjson MIT -github.com/mattn/go-colorable MIT -github.com/mattn/go-isatty MIT -github.com/mitchellh/copystructure MIT -github.com/mitchellh/go-testing-interface MIT -github.com/mitchellh/go-wordwrap MIT -github.com/mitchellh/mapstructure MIT -github.com/mitchellh/reflectwalk MIT -github.com/stretchr/testify MIT -github.com/zclconf/go-cty MIT -github.com/hashicorp/errwrap MPL-2.0 -github.com/hashicorp/go-multierror MPL-2.0 -github.com/hashicorp/go-uuid MPL-2.0 -github.com/hashicorp/hcl/v2 MPL-2.0 -github.com/hashicorp/logutils MPL-2.0 -github.com/hashicorp/terraform-exec MPL-2.0 -github.com/hashicorp/terraform-json MPL-2.0 -github.com/hashicorp/terraform-plugin-go MPL-2.0 -github.com/hashicorp/terraform-registry-address MPL-2.0 -github.com/hashicorp/yamux MPL-2.0 -github.com/hashicorp/go-checkpoint MPL-2.0-no-copyleft-exception -github.com/hashicorp/go-cleanhttp MPL-2.0-no-copyleft-exception -github.com/hashicorp/go-plugin MPL-2.0-no-copyleft-exception -github.com/hashicorp/go-version MPL-2.0-no-copyleft-exception -github.com/hashicorp/hc-install MPL-2.0-no-copyleft-exception -github.com/hashicorp/terraform-plugin-log MPL-2.0-no-copyleft-exception -github.com/hashicorp/terraform-plugin-sdk/v2 MPL-2.0-no-copyleft-exception -github.com/hashicorp/terraform-svchost MPL-2.0-no-copyleft-exception -github.com/apparentlymart/go-textseg/v13 Unicode-TOU +github.com/davecgh/go-spew 0BSD +github.com/agext/levenshtein Apache-2.0 +github.com/apparentlymart/go-textseg/v13 Apache-2.0 +github.com/elastic/cloud-sdk-go Apache-2.0 +github.com/go-openapi/analysis Apache-2.0 +github.com/go-openapi/errors Apache-2.0 +github.com/go-openapi/jsonpointer Apache-2.0 +github.com/go-openapi/jsonreference Apache-2.0 +github.com/go-openapi/loads Apache-2.0 +github.com/go-openapi/runtime Apache-2.0 +github.com/go-openapi/spec Apache-2.0 +github.com/go-openapi/strfmt Apache-2.0 +github.com/go-openapi/swag Apache-2.0 +github.com/go-openapi/validate Apache-2.0 +github.com/oklog/run Apache-2.0 +github.com/oklog/ulid Apache-2.0 +github.com/opentracing/opentracing-go Apache-2.0 +go.mongodb.org/mongo-driver Apache-2.0 +google.golang.org/appengine Apache-2.0 +google.golang.org/genproto Apache-2.0 +google.golang.org/grpc Apache-2.0 +gopkg.in/yaml.v2 Apache-2.0 +gopkg.in/yaml.v3 Apache-2.0 +github.com/vmihailenco/msgpack/v4 BSD-2-Clause +github.com/vmihailenco/msgpack BSD-2-Clause +github.com/vmihailenco/tagparser BSD-2-Clause +github.com/golang/protobuf BSD-3-Clause +github.com/google/go-cmp BSD-3-Clause +github.com/pmezard/go-difflib BSD-3-Clause +golang.org/x/crypto BSD-3-Clause +golang.org/x/exp BSD-3-Clause +golang.org/x/net BSD-3-Clause +golang.org/x/sys BSD-3-Clause +golang.org/x/text BSD-3-Clause +google.golang.org/protobuf BSD-3-Clause +github.com/apparentlymart/go-cidr MIT +github.com/asaskevich/govalidator MIT +github.com/blang/semver/v4 MIT +github.com/blang/semver MIT +github.com/fatih/color MIT +github.com/hashicorp/go-cty MIT +github.com/hashicorp/go-hclog MIT +github.com/josharian/intern MIT +github.com/mailru/easyjson MIT +github.com/mattn/go-colorable MIT +github.com/mattn/go-isatty MIT +github.com/mitchellh/copystructure MIT +github.com/mitchellh/go-testing-interface MIT +github.com/mitchellh/go-wordwrap MIT +github.com/mitchellh/mapstructure MIT +github.com/mitchellh/reflectwalk MIT +github.com/stretchr/testify MIT +github.com/zclconf/go-cty MIT +github.com/hashicorp/errwrap MPL-2.0 +github.com/hashicorp/go-checkpoint MPL-2.0 +github.com/hashicorp/go-plugin MPL-2.0 +github.com/hashicorp/go-uuid MPL-2.0 +github.com/hashicorp/hc-install MPL-2.0 +github.com/hashicorp/logutils MPL-2.0 +github.com/hashicorp/terraform-plugin-framework-validators MPL-2.0 +github.com/hashicorp/terraform-plugin-sdk/v2 MPL-2.0 +github.com/hashicorp/terraform-registry-address MPL-2.0 +github.com/hashicorp/terraform-svchost MPL-2.0 +github.com/hashicorp/yamux MPL-2.0 +github.com/hashicorp/go-cleanhttp MPL-2.0-no-copyleft-exception +github.com/hashicorp/go-multierror MPL-2.0-no-copyleft-exception +github.com/hashicorp/go-version MPL-2.0-no-copyleft-exception +github.com/hashicorp/hcl/v2 MPL-2.0-no-copyleft-exception +github.com/hashicorp/terraform-exec MPL-2.0-no-copyleft-exception +github.com/hashicorp/terraform-json MPL-2.0-no-copyleft-exception +github.com/hashicorp/terraform-plugin-framework MPL-2.0-no-copyleft-exception +github.com/hashicorp/terraform-plugin-go MPL-2.0-no-copyleft-exception +github.com/hashicorp/terraform-plugin-log MPL-2.0-no-copyleft-exception ========================================================================= From c60b15c5c76e0e750fb239808a6771c0f148e003 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Fri, 13 Jan 2023 21:05:51 +0100 Subject: [PATCH 082/104] Update NOTICE --- NOTICE | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/NOTICE b/NOTICE index 1f541e7fe..e8817dccb 100644 --- a/NOTICE +++ b/NOTICE @@ -64,20 +64,20 @@ github.com/hashicorp/go-checkpoint MPL-2.0 github.com/hashicorp/go-plugin MPL-2.0 github.com/hashicorp/go-uuid MPL-2.0 github.com/hashicorp/hc-install MPL-2.0 -github.com/hashicorp/logutils MPL-2.0 +github.com/hashicorp/terraform-exec MPL-2.0 +github.com/hashicorp/terraform-json MPL-2.0 github.com/hashicorp/terraform-plugin-framework-validators MPL-2.0 github.com/hashicorp/terraform-plugin-sdk/v2 MPL-2.0 -github.com/hashicorp/terraform-registry-address MPL-2.0 github.com/hashicorp/terraform-svchost MPL-2.0 -github.com/hashicorp/yamux MPL-2.0 github.com/hashicorp/go-cleanhttp MPL-2.0-no-copyleft-exception github.com/hashicorp/go-multierror MPL-2.0-no-copyleft-exception github.com/hashicorp/go-version MPL-2.0-no-copyleft-exception github.com/hashicorp/hcl/v2 MPL-2.0-no-copyleft-exception -github.com/hashicorp/terraform-exec MPL-2.0-no-copyleft-exception -github.com/hashicorp/terraform-json MPL-2.0-no-copyleft-exception +github.com/hashicorp/logutils MPL-2.0-no-copyleft-exception github.com/hashicorp/terraform-plugin-framework MPL-2.0-no-copyleft-exception github.com/hashicorp/terraform-plugin-go MPL-2.0-no-copyleft-exception github.com/hashicorp/terraform-plugin-log MPL-2.0-no-copyleft-exception +github.com/hashicorp/terraform-registry-address MPL-2.0-no-copyleft-exception +github.com/hashicorp/yamux MPL-2.0-no-copyleft-exception ========================================================================= From d3263ff4a3e804a933419db02ce96310809e53ba Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Mon, 16 Jan 2023 11:23:05 +0100 Subject: [PATCH 083/104] Fixing description for Enterprise Search --- .../deploymentdatasource/schema_enterprise_search.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ec/ecdatasource/deploymentdatasource/schema_enterprise_search.go b/ec/ecdatasource/deploymentdatasource/schema_enterprise_search.go index a59c5440f..f3adfc391 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_enterprise_search.go +++ b/ec/ecdatasource/deploymentdatasource/schema_enterprise_search.go @@ -37,17 +37,17 @@ func enterpriseSearchResourceInfoSchema() tfsdk.Attribute { }, "healthy": { Type: types.BoolType, - Description: "Resource kind health status.", + Description: "Enterprise Search resource health status.", Computed: true, }, "http_endpoint": { Type: types.StringType, - Description: "HTTP endpoint for the resource kind.", + Description: "HTTP endpoint for the Enterprise Search resource.", Computed: true, }, "https_endpoint": { Type: types.StringType, - Description: "HTTPS endpoint for the resource kind.", + Description: "HTTPS endpoint for the Enterprise Search resource.", Computed: true, }, "ref_id": { @@ -62,7 +62,7 @@ func enterpriseSearchResourceInfoSchema() tfsdk.Attribute { }, "status": { Type: types.StringType, - Description: "Resource kind status (for example, \"started\", \"stopped\", etc).", + Description: "Enterprise Search resource status (for example, \"started\", \"stopped\", etc).", Computed: true, }, "version": { From 0d104c25b5a533ffefe2e01589da53e4389be5de Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 18 Jan 2023 07:53:29 +0100 Subject: [PATCH 084/104] (WIP) use types.Set for topologies (unit tests fails) --- .../v2/deployment_create_payload_test.go | 220 ++++++------ .../deployment/v2/deployment_read.go | 53 +-- .../deployment/v2/deployment_read_test.go | 167 +++++---- .../v2/deployment_update_payload_test.go | 216 ++++++------ .../elasticsearch/v2/elasticsearch_payload.go | 68 ++-- .../v2/elasticsearch_payload_test.go | 316 ++++++++++-------- .../elasticsearch/v2/elasticsearch_read.go | 62 +--- .../v2/elasticsearch_read_test.go | 56 ++-- .../v2/elasticsearch_test_utils.go | 2 +- .../v2/elasticsearch_topology.go | 27 +- .../v2/node_roles_plan_modifier_test.go | 13 +- .../elasticsearch/v2/node_roles_test.go | 14 +- .../v2/node_types_plan_modifier_test.go | 13 +- .../elasticsearch/v2/schema.go | 72 +--- .../v2/topology_plan_modifier_test.go | 58 ++-- ec/ecresource/deploymentresource/read.go | 2 +- 16 files changed, 628 insertions(+), 731 deletions(-) diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload_test.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload_test.go index 1e9b411d5..327c0dae1 100644 --- a/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload_test.go +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload_test.go @@ -64,7 +64,9 @@ func Test_createRequest(t *testing.T) { ) defaultElasticsearch := &elasticsearchv2.Elasticsearch{ - HotTier: defaultHotTier, + Topology: elasticsearchv2.ElasticsearchTopologies{ + *defaultHotTier, + }, } sampleKibana := &kibanav2.Kibana{ @@ -126,9 +128,9 @@ func Test_createRequest(t *testing.T) { UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), }, - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("2g"), NodeRoles: []string{ "master", @@ -141,10 +143,8 @@ func Test_createRequest(t *testing.T) { ZoneCount: 1, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), - WarmTier: elasticsearchv2.CreateTierForTest( - "warm", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "warm", Size: ec.String("2g"), NodeRoles: []string{ "data_warm", @@ -153,7 +153,7 @@ func Test_createRequest(t *testing.T) { ZoneCount: 1, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, Kibana: sampleKibana, Apm: sampleApm, @@ -172,9 +172,9 @@ func Test_createRequest(t *testing.T) { UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), }, - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", InstanceConfigurationId: ec.String("aws.data.highio.i3"), Size: ec.String("2g"), NodeTypeData: ec.String("true"), @@ -184,7 +184,7 @@ func Test_createRequest(t *testing.T) { ZoneCount: 1, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, } sampleLegacyDeployment := Deployment{ @@ -889,13 +889,13 @@ func Test_createRequest(t *testing.T) { Version: "7.7.0", Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("4g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, Kibana: &kibanav2.Kibana{ RefId: ec.String("main-kibana"), @@ -1050,8 +1050,10 @@ func Test_createRequest(t *testing.T) { Region: "us-east-1", Version: "7.7.0", Elasticsearch: &elasticsearchv2.Elasticsearch{ - RefId: ec.String("main-elasticsearch"), - HotTier: defaultHotTier, + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + *defaultHotTier, + }, }, Kibana: &kibanav2.Kibana{ RefId: ec.String("main-kibana"), @@ -1448,8 +1450,10 @@ func Test_createRequest(t *testing.T) { Region: "us-east-1", Version: "7.12.0", Elasticsearch: &elasticsearchv2.Elasticsearch{ - RefId: ec.String("main-elasticsearch"), - HotTier: defaultHotTier, + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + *defaultHotTier, + }, Extension: elasticsearchv2.ElasticsearchExtensions{ { Name: "my-plugin", @@ -1603,27 +1607,23 @@ func Test_createRequest(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Autoscale: ec.Bool(true), - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), - ColdTier: elasticsearchv2.CreateTierForTest( - "cold", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "cold", Size: ec.String("2g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), - WarmTier: elasticsearchv2.CreateTierForTest( - "warm", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "warm", Size: ec.String("4g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, }, client: api.NewMock(mock.New200Response(ioOptimizedTpl())), @@ -1755,31 +1755,27 @@ func Test_createRequest(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Autoscale: ec.Bool(true), - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{ MaxSize: ec.String("232g"), }, }, - ), - ColdTier: elasticsearchv2.CreateTierForTest( - "cold", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "cold", Size: ec.String("2g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), - WarmTier: elasticsearchv2.CreateTierForTest( - "warm", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "warm", Size: ec.String("4g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{ MaxSize: ec.String("116g"), }, }, - ), + }, }, }, client: api.NewMock(mock.New200Response(ioOptimizedTpl())), @@ -1910,35 +1906,29 @@ func Test_createRequest(t *testing.T) { Version: "7.12.0", Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), - ColdTier: elasticsearchv2.CreateTierForTest( - "cold", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "cold", Size: ec.String("2g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), - WarmTier: elasticsearchv2.CreateTierForTest( - "warm", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "warm", Size: ec.String("4g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), - MasterTier: elasticsearchv2.CreateTierForTest( - "master", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "master", Size: ec.String("1g"), ZoneCount: 3, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, }, client: api.NewMock(mock.New200Response(ioOptimizedTpl())), @@ -2088,35 +2078,29 @@ func Test_createRequest(t *testing.T) { Version: "7.12.0", Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), - ColdTier: elasticsearchv2.CreateTierForTest( - "cold", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "cold", Size: ec.String("2g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), - WarmTier: elasticsearchv2.CreateTierForTest( - "warm", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "warm", Size: ec.String("4g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), - CoordinatingTier: elasticsearchv2.CreateTierForTest( - "coordinating", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "coordinating", Size: ec.String("2g"), ZoneCount: 2, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, }, client: api.NewMock(mock.New200Response(ioOptimizedTpl())), @@ -2266,43 +2250,35 @@ func Test_createRequest(t *testing.T) { Version: "7.12.0", Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), - ColdTier: elasticsearchv2.CreateTierForTest( - "cold", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "cold", Size: ec.String("2g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), - WarmTier: elasticsearchv2.CreateTierForTest( - "warm", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "warm", Size: ec.String("4g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), - CoordinatingTier: elasticsearchv2.CreateTierForTest( - "coordinating", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "coordinating", Size: ec.String("2g"), ZoneCount: 2, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), - MasterTier: elasticsearchv2.CreateTierForTest( - "master", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "master", Size: ec.String("1g"), ZoneCount: 3, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, }, client: api.NewMock(mock.New200Response(ioOptimizedTpl())), @@ -2481,13 +2457,13 @@ func Test_createRequest(t *testing.T) { TrustAll: ec.Bool(true), }, }, - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, Kibana: &kibanav2.Kibana{ RefId: ec.String("main-kibana"), @@ -2677,31 +2653,27 @@ func Test_createRequest(t *testing.T) { TrustAllowlist: []string{"abc", "dfg"}, }, }, - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{ MaxSize: ec.String("232g"), }, }, - ), - ColdTier: elasticsearchv2.CreateTierForTest( - "cold", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "cold", Size: ec.String("2g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), - WarmTier: elasticsearchv2.CreateTierForTest( - "warm", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "warm", Size: ec.String("4g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{ MaxSize: ec.String("116g"), }, }, - ), + }, }, }, client: api.NewMock(mock.New200Response(ioOptimizedTpl())), @@ -2859,8 +2831,10 @@ func Test_createRequest(t *testing.T) { Region: "us-east-1", Version: "7.9.2", Elasticsearch: &elasticsearchv2.Elasticsearch{ - RefId: ec.String("main-elasticsearch"), - HotTier: defaultHotTier, + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + *defaultHotTier, + }, }, Kibana: &kibanav2.Kibana{ RefId: ec.String("main-kibana"), @@ -2946,13 +2920,13 @@ func Test_createRequest(t *testing.T) { Version: "7.10.1", Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, Tags: map[string]string{ "aaa": "bbb", @@ -3035,13 +3009,13 @@ func Test_createRequest(t *testing.T) { SnapshotSource: &elasticsearchv2.ElasticsearchSnapshotSource{ SourceElasticsearchClusterId: "8c63b87af9e24ea49b8a4bfe550e5fe9", }, - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, }, client: api.NewMock(mock.New200Response(ioOptimizedTpl())), diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_read.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_read.go index 2c0040928..45508ac0a 100644 --- a/ec/ecresource/deploymentresource/deployment/v2/deployment_read.go +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_read.go @@ -35,7 +35,7 @@ import ( "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" "github.com/elastic/terraform-provider-ec/ec/internal/converters" "github.com/elastic/terraform-provider-ec/ec/internal/util" - "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/diag" ) type Deployment struct { @@ -60,43 +60,50 @@ type Deployment struct { } // Nullify Elasticsearch topologies that have zero size and are not specified in plan -func (dep *Deployment) NullifyUnusedEsTopologies(ctx context.Context, esPlan *elasticsearchv2.ElasticsearchTF) { +func (dep *Deployment) NullifyUnusedEsTopologies(ctx context.Context, esPlan *elasticsearchv2.ElasticsearchTF) diag.Diagnostics { if dep.Elasticsearch == nil { - return + return nil } if esPlan == nil { - return + return nil } - dep.Elasticsearch.HotTier = nullifyUnspecifiedZeroSizedTier(esPlan.HotContentTier, dep.Elasticsearch.HotTier) - - dep.Elasticsearch.WarmTier = nullifyUnspecifiedZeroSizedTier(esPlan.WarmTier, dep.Elasticsearch.WarmTier) - - dep.Elasticsearch.ColdTier = nullifyUnspecifiedZeroSizedTier(esPlan.ColdTier, dep.Elasticsearch.ColdTier) - - dep.Elasticsearch.FrozenTier = nullifyUnspecifiedZeroSizedTier(esPlan.FrozenTier, dep.Elasticsearch.FrozenTier) - - dep.Elasticsearch.MlTier = nullifyUnspecifiedZeroSizedTier(esPlan.MlTier, dep.Elasticsearch.MlTier) + var planTopology elasticsearchv2.ElasticsearchTopologiesTF + if diags := esPlan.Topology.ElementsAs(ctx, planTopology, true); diags.HasError() { + return diags + } - dep.Elasticsearch.MasterTier = nullifyUnspecifiedZeroSizedTier(esPlan.MasterTier, dep.Elasticsearch.MasterTier) + planTopologiesSet := planTopology.AsSet() - dep.Elasticsearch.CoordinatingTier = nullifyUnspecifiedZeroSizedTier(esPlan.CoordinatingTier, dep.Elasticsearch.CoordinatingTier) -} + filteredTopologies := make(elasticsearchv2.ElasticsearchTopologies, len(dep.Elasticsearch.Topology)) -func nullifyUnspecifiedZeroSizedTier(tierPlan types.Object, tier *elasticsearchv2.ElasticsearchTopology) *elasticsearchv2.ElasticsearchTopology { + for _, tier := range dep.Elasticsearch.Topology { + planTier := planTopologiesSet[tier.Id] + size, err := converters.ParseTopologySize(tier.Size, tier.SizeResource) - if tierPlan.IsNull() && tier != nil { + if err != nil { + var diags diag.Diagnostics + diags.AddError("Cannot remove unused Elasticsearch topologies from backend response", err.Error()) + return diags + } - size, err := converters.ParseTopologySize(tier.Size, tier.SizeResource) + if size == nil || size.Value == nil { + var diags diag.Diagnostics + diags.AddError("Cannot remove unused Elasticsearch topologies from backend response", fmt.Sprintf("the topology [%s] size is nil", tier.Id)) + return diags + } - // we can ignore returning an error here - it's handled in readers - if err == nil && size != nil && size.Value != nil && *size.Value == 0 { - tier = nil + if planTier == nil && *size.Value == 0 { + continue } + + filteredTopologies = append(filteredTopologies, tier) } - return tier + dep.Elasticsearch.Topology = filteredTopologies + + return nil } func ReadDeployment(res *models.DeploymentGetResponse, remotes *models.RemoteResources, deploymentResources []*models.DeploymentResource) (*Deployment, error) { diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_read_test.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_read_test.go index d70fe9c48..a49e2644f 100644 --- a/ec/ecresource/deploymentresource/deployment/v2/deployment_read_test.go +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_read_test.go @@ -62,9 +62,9 @@ func Test_readDeployment(t *testing.T) { UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), }, - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", InstanceConfigurationId: ec.String("aws.data.highio.i3"), Size: ec.String("2g"), SizeResource: ec.String("memory"), @@ -75,7 +75,7 @@ func Test_readDeployment(t *testing.T) { ZoneCount: 1, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, Kibana: &kibanav2.Kibana{ ElasticsearchClusterRefId: ec.String("main-elasticsearch"), @@ -434,9 +434,9 @@ func Test_readDeployment(t *testing.T) { UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), }, - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", InstanceConfigurationId: ec.String("aws.data.highio.i3"), Size: ec.String("2g"), SizeResource: ec.String("memory"), @@ -447,7 +447,7 @@ func Test_readDeployment(t *testing.T) { ZoneCount: 1, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, Kibana: &kibanav2.Kibana{ ElasticsearchClusterRefId: ec.String("main-elasticsearch"), @@ -483,9 +483,9 @@ func Test_readDeployment(t *testing.T) { CloudID: ec.String("up2d:somecloudID"), HttpEndpoint: ec.String("http://1238f19957874af69306787dca662154.eastus2.azure.elastic-cloud.com:9200"), HttpsEndpoint: ec.String("https://1238f19957874af69306787dca662154.eastus2.azure.elastic-cloud.com:9243"), - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", InstanceConfigurationId: ec.String("azure.data.highio.l32sv2"), Size: ec.String("4g"), SizeResource: ec.String("memory"), @@ -496,7 +496,7 @@ func Test_readDeployment(t *testing.T) { ZoneCount: 2, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, Config: &elasticsearchv2.ElasticsearchConfig{}, }, Kibana: &kibanav2.Kibana{ @@ -545,9 +545,9 @@ func Test_readDeployment(t *testing.T) { HttpEndpoint: ec.String("http://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9200"), HttpsEndpoint: ec.String("https://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9243"), Config: &elasticsearchv2.ElasticsearchConfig{}, - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", InstanceConfigurationId: ec.String("aws.data.highio.i3"), Size: ec.String("8g"), SizeResource: ec.String("memory"), @@ -558,7 +558,7 @@ func Test_readDeployment(t *testing.T) { ZoneCount: 2, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, Kibana: &kibanav2.Kibana{ ElasticsearchClusterRefId: ec.String("main-elasticsearch"), @@ -608,9 +608,9 @@ func Test_readDeployment(t *testing.T) { HttpEndpoint: ec.String("http://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9200"), HttpsEndpoint: ec.String("https://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9243"), Config: &elasticsearchv2.ElasticsearchConfig{}, - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", InstanceConfigurationId: ec.String("aws.data.highio.i3"), Size: ec.String("8g"), SizeResource: ec.String("memory"), @@ -621,7 +621,7 @@ func Test_readDeployment(t *testing.T) { ZoneCount: 2, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, Extension: elasticsearchv2.ElasticsearchExtensions{ { Name: "custom-bundle", @@ -754,14 +754,14 @@ func Test_readDeployment(t *testing.T) { RefId: ec.String("main-elasticsearch"), Region: ec.String("aws-eu-central-1"), Config: &elasticsearchv2.ElasticsearchConfig{}, - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("4g"), SizeResource: ec.String("memory"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, TrustAccount: elasticsearchv2.ElasticsearchTrustAccounts{ { AccountId: ec.String("ANID"), @@ -840,14 +840,14 @@ func Test_readDeployment(t *testing.T) { RefId: ec.String("main-elasticsearch"), Region: ec.String("aws-eu-central-1"), Config: &elasticsearchv2.ElasticsearchConfig{}, - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("4g"), SizeResource: ec.String("memory"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, }, }, @@ -988,15 +988,15 @@ func Test_readDeployment(t *testing.T) { Config: &elasticsearchv2.ElasticsearchConfig{ DockerImage: ec.String("docker.elastic.com/elasticsearch/cloud:7.14.1-hash"), }, - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("4g"), SizeResource: ec.String("memory"), ZoneCount: 1, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, Kibana: &kibanav2.Kibana{ RefId: ec.String("main-kibana"), @@ -1065,9 +1065,9 @@ func Test_readDeployment(t *testing.T) { HttpEndpoint: ec.String("http://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9200"), HttpsEndpoint: ec.String("https://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9243"), Config: &elasticsearchv2.ElasticsearchConfig{}, - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", InstanceConfigurationId: ec.String("aws.data.highio.i3"), Size: ec.String("8g"), SizeResource: ec.String("memory"), @@ -1078,7 +1078,7 @@ func Test_readDeployment(t *testing.T) { ZoneCount: 2, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, Kibana: &kibanav2.Kibana{ ElasticsearchClusterRefId: ec.String("main-elasticsearch"), @@ -1126,9 +1126,9 @@ func Test_readDeployment(t *testing.T) { HttpEndpoint: ec.String("http://123695e76d914005bf90b717e668ad4b.asia-east1.gcp.elastic-cloud.com:9200"), HttpsEndpoint: ec.String("https://123695e76d914005bf90b717e668ad4b.asia-east1.gcp.elastic-cloud.com:9243"), Config: &elasticsearchv2.ElasticsearchConfig{}, - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", InstanceConfigurationId: ec.String("gcp.data.highio.1"), Size: ec.String("8g"), SizeResource: ec.String("memory"), @@ -1139,7 +1139,7 @@ func Test_readDeployment(t *testing.T) { ZoneCount: 2, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, Kibana: &kibanav2.Kibana{ ElasticsearchClusterRefId: ec.String("main-elasticsearch"), @@ -1187,9 +1187,9 @@ func Test_readDeployment(t *testing.T) { HttpEndpoint: ec.String("http://123695e76d914005bf90b717e668ad4b.asia-east1.gcp.elastic-cloud.com:9200"), HttpsEndpoint: ec.String("https://123695e76d914005bf90b717e668ad4b.asia-east1.gcp.elastic-cloud.com:9243"), Config: &elasticsearchv2.ElasticsearchConfig{}, - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", InstanceConfigurationId: ec.String("gcp.data.highio.1"), Size: ec.String("8g"), SizeResource: ec.String("memory"), @@ -1204,10 +1204,8 @@ func Test_readDeployment(t *testing.T) { PolicyOverrideJson: ec.String(`{"proactive_storage":{"forecast_window":"3 h"}}`), }, }, - ), - MlTier: elasticsearchv2.CreateTierForTest( - "ml", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "ml", InstanceConfigurationId: ec.String("gcp.ml.1"), Size: ec.String("1g"), SizeResource: ec.String("memory"), @@ -1223,10 +1221,8 @@ func Test_readDeployment(t *testing.T) { MinSizeResource: ec.String("memory"), }, }, - ), - MasterTier: elasticsearchv2.CreateTierForTest( - "master", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "master", InstanceConfigurationId: ec.String("gcp.master.1"), Size: ec.String("0g"), SizeResource: ec.String("memory"), @@ -1237,10 +1233,8 @@ func Test_readDeployment(t *testing.T) { ZoneCount: 3, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), - CoordinatingTier: elasticsearchv2.CreateTierForTest( - "coordinating", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "coordinating", InstanceConfigurationId: ec.String("gcp.coordinating.1"), Size: ec.String("0g"), SizeResource: ec.String("memory"), @@ -1251,7 +1245,7 @@ func Test_readDeployment(t *testing.T) { ZoneCount: 2, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, Kibana: &kibanav2.Kibana{ ElasticsearchClusterRefId: ec.String("main-elasticsearch"), @@ -1298,9 +1292,9 @@ func Test_readDeployment(t *testing.T) { HttpEndpoint: ec.String("http://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9200"), HttpsEndpoint: ec.String("https://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9243"), Config: &elasticsearchv2.ElasticsearchConfig{}, - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", InstanceConfigurationId: ec.String("gcp.data.highio.1"), Size: ec.String("4g"), SizeResource: ec.String("memory"), @@ -1311,10 +1305,9 @@ func Test_readDeployment(t *testing.T) { ZoneCount: 2, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), - WarmTier: elasticsearchv2.CreateTierForTest( - "warm", - elasticsearchv2.ElasticsearchTopology{ + { + + Id: "warm", InstanceConfigurationId: ec.String("gcp.data.highstorage.1"), Size: ec.String("4g"), SizeResource: ec.String("memory"), @@ -1325,10 +1318,8 @@ func Test_readDeployment(t *testing.T) { ZoneCount: 2, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), - CoordinatingTier: elasticsearchv2.CreateTierForTest( - "coordinating", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "coordinating", InstanceConfigurationId: ec.String("gcp.coordinating.1"), Size: ec.String("0g"), SizeResource: ec.String("memory"), @@ -1339,7 +1330,7 @@ func Test_readDeployment(t *testing.T) { ZoneCount: 2, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, Kibana: &kibanav2.Kibana{ ElasticsearchClusterRefId: ec.String("main-elasticsearch"), @@ -1386,9 +1377,9 @@ func Test_readDeployment(t *testing.T) { HttpEndpoint: ec.String("http://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9200"), HttpsEndpoint: ec.String("https://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9243"), Config: &elasticsearchv2.ElasticsearchConfig{}, - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", InstanceConfigurationId: ec.String("gcp.data.highio.1"), Size: ec.String("4g"), SizeResource: ec.String("memory"), @@ -1403,10 +1394,8 @@ func Test_readDeployment(t *testing.T) { }, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), - WarmTier: elasticsearchv2.CreateTierForTest( - "warm", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "warm", InstanceConfigurationId: ec.String("gcp.data.highstorage.1"), Size: ec.String("4g"), SizeResource: ec.String("memory"), @@ -1417,10 +1406,8 @@ func Test_readDeployment(t *testing.T) { }, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), - MlTier: elasticsearchv2.CreateTierForTest( - "ml", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "ml", InstanceConfigurationId: ec.String("gcp.ml.1"), Size: ec.String("0g"), SizeResource: ec.String("memory"), @@ -1428,10 +1415,8 @@ func Test_readDeployment(t *testing.T) { NodeRoles: []string{"ml", "remote_cluster_client"}, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), - MasterTier: elasticsearchv2.CreateTierForTest( - "master", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "master", InstanceConfigurationId: ec.String("gcp.master.1"), Size: ec.String("0g"), SizeResource: ec.String("memory"), @@ -1439,10 +1424,8 @@ func Test_readDeployment(t *testing.T) { NodeRoles: []string{"master"}, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), - CoordinatingTier: elasticsearchv2.CreateTierForTest( - "coordinating", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "coordinating", InstanceConfigurationId: ec.String("gcp.coordinating.1"), Size: ec.String("0g"), SizeResource: ec.String("memory"), @@ -1450,7 +1433,7 @@ func Test_readDeployment(t *testing.T) { NodeRoles: []string{"ingest", "remote_cluster_client"}, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, Kibana: &kibanav2.Kibana{ ElasticsearchClusterRefId: ec.String("main-elasticsearch"), @@ -1523,9 +1506,9 @@ func Test_readDeployment(t *testing.T) { RefId: ec.String("main-elasticsearch"), }, }, - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", InstanceConfigurationId: ec.String("aws.ccs.r5d"), Size: ec.String("1g"), SizeResource: ec.String("memory"), @@ -1536,7 +1519,7 @@ func Test_readDeployment(t *testing.T) { NodeTypeMl: ec.String("false"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, Kibana: &kibanav2.Kibana{ ElasticsearchClusterRefId: ec.String("main-elasticsearch"), diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload_test.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload_test.go index 575ec4a05..c36a98622 100644 --- a/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload_test.go +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload_test.go @@ -46,7 +46,9 @@ func Test_updateResourceToModel(t *testing.T) { ) defaultElasticsearch := &elasticsearchv2.Elasticsearch{ - HotTier: defaultHotTier, + Topology: elasticsearchv2.ElasticsearchTopologies{ + *defaultHotTier, + }, } var ioOptimizedTpl = func() io.ReadCloser { @@ -96,9 +98,9 @@ func Test_updateResourceToModel(t *testing.T) { UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), }, - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", InstanceConfigurationId: ec.String("aws.data.highio.i3"), Size: ec.String("2g"), NodeTypeData: ec.String("true"), @@ -108,7 +110,7 @@ func Test_updateResourceToModel(t *testing.T) { ZoneCount: 1, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, Kibana: &kibanav2.Kibana{ ElasticsearchClusterRefId: ec.String("main-elasticsearch"), @@ -463,13 +465,13 @@ func Test_updateResourceToModel(t *testing.T) { Version: "7.7.0", Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("4g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, Kibana: &kibanav2.Kibana{ RefId: ec.String("main-kibana"), @@ -740,8 +742,10 @@ func Test_updateResourceToModel(t *testing.T) { Region: "us-east-1", Version: "7.9.2", Elasticsearch: &elasticsearchv2.Elasticsearch{ - RefId: ec.String("main-elasticsearch"), - HotTier: defaultHotTier, + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + *defaultHotTier, + }, }, Kibana: &kibanav2.Kibana{ RefId: ec.String("main-kibana"), @@ -765,9 +769,9 @@ func Test_updateResourceToModel(t *testing.T) { UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), }, - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", InstanceConfigurationId: ec.String("aws.data.highio.i3"), Size: ec.String("2g"), NodeTypeData: ec.String("true"), @@ -777,7 +781,7 @@ func Test_updateResourceToModel(t *testing.T) { ZoneCount: 1, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, Kibana: &kibanav2.Kibana{ ElasticsearchClusterRefId: ec.String("main-elasticsearch"), @@ -902,8 +906,8 @@ func Test_updateResourceToModel(t *testing.T) { Region: "us-east-1", Version: "7.9.2", Elasticsearch: &elasticsearchv2.Elasticsearch{ - RefId: ec.String("main-elasticsearch"), - HotTier: defaultHotTier, + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{*defaultHotTier}, }, Kibana: &kibanav2.Kibana{ ElasticsearchClusterRefId: ec.String("main-elasticsearch"), @@ -918,20 +922,18 @@ func Test_updateResourceToModel(t *testing.T) { Version: "7.9.2", Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("16g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), - CoordinatingTier: elasticsearchv2.CreateTierForTest( - "coordinating", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "coordinating", Size: ec.String("16g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, Kibana: &kibanav2.Kibana{ ElasticsearchClusterRefId: ec.String("main-elasticsearch"), @@ -1027,8 +1029,8 @@ func Test_updateResourceToModel(t *testing.T) { Region: "us-east-1", Version: "7.9.2", Elasticsearch: &elasticsearchv2.Elasticsearch{ - RefId: ec.String("main-elasticsearch"), - HotTier: defaultHotTier, + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{*defaultHotTier}, }, Kibana: &kibanav2.Kibana{ ElasticsearchClusterRefId: ec.String("main-elasticsearch"), @@ -1051,20 +1053,18 @@ func Test_updateResourceToModel(t *testing.T) { Version: "7.9.2", Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("16g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), - CoordinatingTier: elasticsearchv2.CreateTierForTest( - "coordinating", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "coordinating", Size: ec.String("16g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, Kibana: &kibanav2.Kibana{ ElasticsearchClusterRefId: ec.String("main-elasticsearch"), @@ -1208,9 +1208,9 @@ func Test_updateResourceToModel(t *testing.T) { Version: "7.11.1", Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("16g"), NodeTypeData: ec.String("true"), NodeTypeIngest: ec.String("true"), @@ -1218,7 +1218,7 @@ func Test_updateResourceToModel(t *testing.T) { NodeTypeMl: ec.String("false"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, }, state: &Deployment{ @@ -1229,9 +1229,9 @@ func Test_updateResourceToModel(t *testing.T) { Version: "7.9.1", Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("16g"), NodeTypeData: ec.String("true"), NodeTypeIngest: ec.String("true"), @@ -1239,7 +1239,7 @@ func Test_updateResourceToModel(t *testing.T) { NodeTypeMl: ec.String("false"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, }, client: api.NewMock(mock.New200Response(ioOptimizedTpl())), @@ -1311,16 +1311,16 @@ func Test_updateResourceToModel(t *testing.T) { Version: "7.11.1", Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("16g"), NodeTypeData: ec.String("true"), NodeTypeIngest: ec.String("true"), NodeTypeMaster: ec.String("true"), NodeTypeMl: ec.String("false"), }, - ), + }, }, }, state: &Deployment{ @@ -1331,16 +1331,16 @@ func Test_updateResourceToModel(t *testing.T) { Version: "7.10.1", Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("16g"), NodeTypeData: ec.String("true"), NodeTypeIngest: ec.String("true"), NodeTypeMaster: ec.String("true"), NodeTypeMl: ec.String("false"), }, - ), + }, }, }, client: api.NewMock(mock.New200Response(ioOptimizedTpl())), @@ -1414,9 +1414,9 @@ func Test_updateResourceToModel(t *testing.T) { Version: "7.10.1", Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("32g"), NodeTypeData: ec.String("true"), NodeTypeIngest: ec.String("true"), @@ -1424,7 +1424,7 @@ func Test_updateResourceToModel(t *testing.T) { NodeTypeMl: ec.String("false"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, }, state: &Deployment{ @@ -1435,9 +1435,9 @@ func Test_updateResourceToModel(t *testing.T) { Version: "7.10.1", Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("16g"), NodeTypeData: ec.String("true"), NodeTypeIngest: ec.String("true"), @@ -1445,7 +1445,7 @@ func Test_updateResourceToModel(t *testing.T) { NodeTypeMl: ec.String("false"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, }, client: api.NewMock(mock.New200Response(ioOptimizedTpl())), @@ -1519,9 +1519,9 @@ func Test_updateResourceToModel(t *testing.T) { Version: "7.10.1", Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("16g"), NodeTypeData: ec.String("true"), NodeTypeIngest: ec.String("true"), @@ -1529,14 +1529,12 @@ func Test_updateResourceToModel(t *testing.T) { NodeTypeMl: ec.String("false"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), - WarmTier: elasticsearchv2.CreateTierForTest( - "warm", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "warm", Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, }, state: &Deployment{ @@ -1547,9 +1545,9 @@ func Test_updateResourceToModel(t *testing.T) { Version: "7.10.1", Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("16g"), NodeTypeData: ec.String("true"), NodeTypeIngest: ec.String("true"), @@ -1557,7 +1555,7 @@ func Test_updateResourceToModel(t *testing.T) { NodeTypeMl: ec.String("false"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, }, client: api.NewMock(mock.New200Response(ioOptimizedTpl())), @@ -1660,20 +1658,18 @@ func Test_updateResourceToModel(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Autoscale: ec.Bool(true), - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("16g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), - WarmTier: elasticsearchv2.CreateTierForTest( - "warm", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "warm", Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, }, state: &Deployment{ @@ -1685,13 +1681,13 @@ func Test_updateResourceToModel(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Autoscale: ec.Bool(true), - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("16g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, }, client: api.NewMock(mock.New200Response(ioOptimizedTpl())), @@ -1793,13 +1789,13 @@ func Test_updateResourceToModel(t *testing.T) { Version: "7.10.1", Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, Tags: map[string]string{ "aaa": "bbb", @@ -1815,13 +1811,13 @@ func Test_updateResourceToModel(t *testing.T) { Version: "7.10.1", Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, }, client: api.NewMock(mock.New200Response(ioOptimizedTpl())), @@ -1897,13 +1893,13 @@ func Test_updateResourceToModel(t *testing.T) { Version: "7.10.1", Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, SnapshotSource: &elasticsearchv2.ElasticsearchSnapshotSource{ SourceElasticsearchClusterId: "8c63b87af9e24ea49b8a4bfe550e5fe9", }, @@ -1917,13 +1913,13 @@ func Test_updateResourceToModel(t *testing.T) { Version: "7.10.1", Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, }, client: api.NewMock(mock.New200Response(ioOptimizedTpl())), @@ -2004,13 +2000,13 @@ func Test_updateResourceToModel(t *testing.T) { Version: "7.10.1", Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, }, client: api.NewMock(mock.New200Response(ioOptimizedTpl())), @@ -2095,20 +2091,18 @@ func Test_updateResourceToModel(t *testing.T) { Version: "7.9.2", Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), - HotTier: elasticsearchv2.CreateTierForTest( - "hot_content", - elasticsearchv2.ElasticsearchTopology{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "hot_content", Size: ec.String("16g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), - CoordinatingTier: elasticsearchv2.CreateTierForTest( - "coordinating", - elasticsearchv2.ElasticsearchTopology{ + { + Id: "coordinating", Size: ec.String("16g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - ), + }, }, Kibana: &kibanav2.Kibana{ ElasticsearchClusterRefId: ec.String("main-elasticsearch"), diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go index 2aa98d95c..27941da48 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go @@ -30,27 +30,21 @@ import ( ) type ElasticsearchTF struct { - Autoscale types.Bool `tfsdk:"autoscale"` - RefId types.String `tfsdk:"ref_id"` - ResourceId types.String `tfsdk:"resource_id"` - Region types.String `tfsdk:"region"` - CloudID types.String `tfsdk:"cloud_id"` - HttpEndpoint types.String `tfsdk:"http_endpoint"` - HttpsEndpoint types.String `tfsdk:"https_endpoint"` - HotContentTier types.Object `tfsdk:"hot"` - CoordinatingTier types.Object `tfsdk:"coordinating"` - MasterTier types.Object `tfsdk:"master"` - WarmTier types.Object `tfsdk:"warm"` - ColdTier types.Object `tfsdk:"cold"` - FrozenTier types.Object `tfsdk:"frozen"` - MlTier types.Object `tfsdk:"ml"` - Config types.Object `tfsdk:"config"` - RemoteCluster types.Set `tfsdk:"remote_cluster"` - SnapshotSource types.Object `tfsdk:"snapshot_source"` - Extension types.Set `tfsdk:"extension"` - TrustAccount types.Set `tfsdk:"trust_account"` - TrustExternal types.Set `tfsdk:"trust_external"` - Strategy types.String `tfsdk:"strategy"` + Autoscale types.Bool `tfsdk:"autoscale"` + RefId types.String `tfsdk:"ref_id"` + ResourceId types.String `tfsdk:"resource_id"` + Region types.String `tfsdk:"region"` + CloudID types.String `tfsdk:"cloud_id"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + Topology types.Set `tfsdk:"topology"` + Config types.Object `tfsdk:"config"` + RemoteCluster types.Set `tfsdk:"remote_cluster"` + SnapshotSource types.Object `tfsdk:"snapshot_source"` + Extension types.Set `tfsdk:"extension"` + TrustAccount types.Set `tfsdk:"trust_account"` + TrustExternal types.Set `tfsdk:"trust_external"` + Strategy types.String `tfsdk:"strategy"` } func ElasticsearchPayload(ctx context.Context, esObj types.Object, template *models.DeploymentTemplateInfoV2, dtID, version string, useNodeRoles bool, skipTopologies bool) (*models.ElasticsearchPayload, diag.Diagnostics) { @@ -125,31 +119,13 @@ func (es *ElasticsearchTF) payload(ctx context.Context, res *models.Elasticsearc return res, diags } -func (es *ElasticsearchTF) topologyObjects() map[string]types.Object { - return map[string]types.Object{ - "hot_content": es.HotContentTier, - "warm": es.WarmTier, - "cold": es.ColdTier, - "frozen": es.FrozenTier, - "ml": es.MlTier, - "master": es.MasterTier, - "coordinating": es.CoordinatingTier, - } -} - -func (es *ElasticsearchTF) topologies(ctx context.Context) (map[string]*ElasticsearchTopologyTF, diag.Diagnostics) { - var diagnostics diag.Diagnostics - - tierObjects := es.topologyObjects() - res := make(map[string]*ElasticsearchTopologyTF, len(tierObjects)) - - for topologyId, topologyObject := range tierObjects { - tier, diags := objectToTopology(ctx, topologyObject) - diagnostics.Append(diags...) - res[topologyId] = tier +func (es *ElasticsearchTF) topologies(ctx context.Context) ([]*ElasticsearchTopologyTF, diag.Diagnostics) { + var topologies []*ElasticsearchTopologyTF + if diags := es.Topology.ElementsAs(ctx, &topologies, true); diags.HasError() { + return nil, diags } - return res, diagnostics + return topologies, nil } func (es *ElasticsearchTF) topologiesPayload(ctx context.Context, topologyModels []*models.ElasticsearchClusterTopologyElement) diag.Diagnostics { @@ -159,9 +135,9 @@ func (es *ElasticsearchTF) topologiesPayload(ctx context.Context, topologyModels return diags } - for tierId, tier := range tiers { + for _, tier := range tiers { if tier != nil { - diags.Append(tier.payload(ctx, tierId, topologyModels)...) + diags.Append(tier.payload(ctx, tier.Id.Value, topologyModels)...) } } diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go index c2d038cec..fc81fec36 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go @@ -111,10 +111,12 @@ func Test_writeElasticsearch(t *testing.T) { RefId: ec.String("main-elasticsearch"), ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), - HotTier: &ElasticsearchTopology{ - id: "hot_content", - Size: ec.String("2g"), - ZoneCount: 1, + Topology: ElasticsearchTopologies{ + { + Id: "hot_content", + Size: ec.String("2g"), + ZoneCount: 1, + }, }, }, template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), @@ -177,10 +179,12 @@ func Test_writeElasticsearch(t *testing.T) { RefId: ec.String("main-elasticsearch"), ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), - HotTier: &ElasticsearchTopology{ - id: "hot_content", - Size: ec.String("2g"), - ZoneCount: 1, + Topology: ElasticsearchTopologies{ + { + Id: "hot_content", + Size: ec.String("2g"), + ZoneCount: 1, + }, }, }, template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), @@ -246,11 +250,13 @@ func Test_writeElasticsearch(t *testing.T) { RefId: ec.String("main-elasticsearch"), ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), - HotTier: &ElasticsearchTopology{ - id: "hot_content", - Size: ec.String("2g"), - ZoneCount: 1, - NodeRoles: []string{"a", "b", "c"}, + Topology: ElasticsearchTopologies{ + { + Id: "hot_content", + Size: ec.String("2g"), + ZoneCount: 1, + NodeRoles: []string{"a", "b", "c"}, + }, }, }, template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), @@ -372,15 +378,17 @@ func Test_writeElasticsearch(t *testing.T) { RefId: ec.String("main-elasticsearch"), ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), - HotTier: &ElasticsearchTopology{ - id: "hot_content", - Size: ec.String("2g"), - ZoneCount: 1, - }, - WarmTier: &ElasticsearchTopology{ - id: "warm", - Size: ec.String("2g"), - ZoneCount: 1, + Topology: ElasticsearchTopologies{ + { + Id: "hot_content", + Size: ec.String("2g"), + ZoneCount: 1, + }, + { + Id: "warm", + Size: ec.String("2g"), + ZoneCount: 1, + }, }, }, template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-hot-warm-v2.json"), @@ -477,15 +485,17 @@ func Test_writeElasticsearch(t *testing.T) { Config: &ElasticsearchConfig{ UserSettingsYaml: ec.String("somesetting: true"), }, - HotTier: &ElasticsearchTopology{ - id: "hot_content", - Size: ec.String("2g"), - ZoneCount: 1, - }, - WarmTier: &ElasticsearchTopology{ - id: "warm", - Size: ec.String("2g"), - ZoneCount: 1, + Topology: ElasticsearchTopologies{ + { + Id: "hot_content", + Size: ec.String("2g"), + ZoneCount: 1, + }, + { + Id: "warm", + Size: ec.String("2g"), + ZoneCount: 1, + }, }, }, template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-hot-warm-v2.json"), @@ -672,16 +682,18 @@ func Test_writeElasticsearch(t *testing.T) { RefId: ec.String("main-elasticsearch"), ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), - HotTier: &ElasticsearchTopology{ - id: "hot_content", - NodeTypeData: ec.String("false"), - NodeTypeMaster: ec.String("false"), - NodeTypeIngest: ec.String("false"), - NodeTypeMl: ec.String("true"), - }, - WarmTier: &ElasticsearchTopology{ - id: "warm", - NodeTypeMaster: ec.String("true"), + Topology: ElasticsearchTopologies{ + { + Id: "hot_content", + NodeTypeData: ec.String("false"), + NodeTypeMaster: ec.String("false"), + NodeTypeIngest: ec.String("false"), + NodeTypeMl: ec.String("true"), + }, + { + Id: "warm", + NodeTypeMaster: ec.String("true"), + }, }, }, template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-hot-warm-v2.json"), @@ -776,20 +788,22 @@ func Test_writeElasticsearch(t *testing.T) { RefId: ec.String("main-elasticsearch"), ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), - HotTier: &ElasticsearchTopology{ - id: "hot_content", - NodeTypeData: ec.String("false"), - NodeTypeMaster: ec.String("false"), - NodeTypeIngest: ec.String("false"), - NodeTypeMl: ec.String("true"), - }, - WarmTier: &ElasticsearchTopology{ - id: "warm", - NodeTypeMaster: ec.String("true"), - }, - ColdTier: &ElasticsearchTopology{ - id: "cold", - Size: ec.String("2g"), + Topology: ElasticsearchTopologies{ + { + Id: "hot_content", + NodeTypeData: ec.String("false"), + NodeTypeMaster: ec.String("false"), + NodeTypeIngest: ec.String("false"), + NodeTypeMl: ec.String("true"), + }, + { + Id: "warm", + NodeTypeMaster: ec.String("true"), + }, + { + Id: "cold", + Size: ec.String("2g"), + }, }, }, template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-hot-warm-v2.json"), @@ -914,15 +928,17 @@ func Test_writeElasticsearch(t *testing.T) { RefId: ec.String("main-elasticsearch"), ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), - HotTier: &ElasticsearchTopology{ - id: "hot_content", - }, - WarmTier: &ElasticsearchTopology{ - id: "warm", - }, - ColdTier: &ElasticsearchTopology{ - id: "cold", - Size: ec.String("2g"), + Topology: ElasticsearchTopologies{ + { + Id: "hot_content", + }, + { + Id: "warm", + }, + { + Id: "cold", + Size: ec.String("2g"), + }, }, }, template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-hot-warm-v2.json"), @@ -1047,31 +1063,33 @@ func Test_writeElasticsearch(t *testing.T) { RefId: ec.String("main-elasticsearch"), ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), - HotTier: &ElasticsearchTopology{ - id: "hot_content", - Autoscaling: &ElasticsearchTopologyAutoscaling{ - MaxSize: ec.String("58g"), + Topology: ElasticsearchTopologies{ + { + Id: "hot_content", + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("58g"), + }, }, - }, - WarmTier: &ElasticsearchTopology{ - id: "warm", - Autoscaling: &ElasticsearchTopologyAutoscaling{ - MaxSize: ec.String("29g"), + { + Id: "warm", + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("29g"), + }, }, - }, - ColdTier: &ElasticsearchTopology{ - id: "cold", - Size: ec.String("2g"), - Autoscaling: &ElasticsearchTopologyAutoscaling{ - MaxSize: ec.String("29g"), + { + Id: "cold", + Size: ec.String("2g"), + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("29g"), + }, }, - }, - MlTier: &ElasticsearchTopology{ - id: "ml", - Size: ec.String("1g"), - Autoscaling: &ElasticsearchTopologyAutoscaling{ - MaxSize: ec.String("29g"), - MinSize: ec.String("1g"), + { + Id: "ml", + Size: ec.String("1g"), + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("29g"), + MinSize: ec.String("1g"), + }, }, }, }, @@ -1224,18 +1242,20 @@ func Test_writeElasticsearch(t *testing.T) { RefId: ec.String("main-elasticsearch"), ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), - HotTier: &ElasticsearchTopology{ - id: "hot_content", - Autoscaling: &ElasticsearchTopologyAutoscaling{ - MaxSize: ec.String("450g"), - MinSize: ec.String("2g"), + Topology: ElasticsearchTopologies{ + { + Id: "hot_content", + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("450g"), + MinSize: ec.String("2g"), + }, }, - }, - MasterTier: &ElasticsearchTopology{ - id: "master", - Autoscaling: &ElasticsearchTopologyAutoscaling{ - MaxSize: ec.String("250g"), - MinSize: ec.String("1g"), + { + Id: "master", + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("250g"), + MinSize: ec.String("1g"), + }, }, }, }, @@ -1336,28 +1356,30 @@ func Test_writeElasticsearch(t *testing.T) { RefId: ec.String("main-elasticsearch"), ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), - HotTier: &ElasticsearchTopology{ - id: "hot_content", - Autoscaling: &ElasticsearchTopologyAutoscaling{ - MaxSize: ec.String("450g"), - MaxSizeResource: ec.String("storage"), + Topology: ElasticsearchTopologies{ + { + Id: "hot_content", + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("450g"), + MaxSizeResource: ec.String("storage"), + }, }, - }, - WarmTier: &ElasticsearchTopology{ - id: "warm", - Autoscaling: &ElasticsearchTopologyAutoscaling{ - MaxSize: ec.String("870g"), - MaxSizeResource: ec.String("storage"), + { + Id: "warm", + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("870g"), + MaxSizeResource: ec.String("storage"), + }, }, - }, - ColdTier: &ElasticsearchTopology{ - id: "cold", - Size: ec.String("4g"), - Autoscaling: &ElasticsearchTopologyAutoscaling{ - MaxSize: ec.String("1740g"), - MaxSizeResource: ec.String("storage"), - MinSizeResource: ec.String("storage"), - MinSize: ec.String("4g"), + { + Id: "cold", + Size: ec.String("4g"), + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("1740g"), + MaxSizeResource: ec.String("storage"), + MinSizeResource: ec.String("storage"), + MinSize: ec.String("4g"), + }, }, }, }, @@ -1493,10 +1515,12 @@ func Test_writeElasticsearch(t *testing.T) { UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), Plugins: []string{"plugin"}, }, - HotTier: &ElasticsearchTopology{ - id: "hot_content", - Size: ec.String("2g"), - ZoneCount: 1, + Topology: ElasticsearchTopologies{ + { + Id: "hot_content", + Size: ec.String("2g"), + ZoneCount: 1, + }, }, }, template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), @@ -1570,10 +1594,12 @@ func Test_writeElasticsearch(t *testing.T) { SnapshotName: "__latest_success__", SourceElasticsearchClusterId: mock.ValidClusterID, }, - HotTier: &ElasticsearchTopology{ - id: "hot_content", - Size: ec.String("2g"), - ZoneCount: 1, + Topology: ElasticsearchTopologies{ + { + Id: "hot_content", + Size: ec.String("2g"), + ZoneCount: 1, + }, }, }, template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), @@ -1640,10 +1666,12 @@ func Test_writeElasticsearch(t *testing.T) { RefId: ec.String("main-elasticsearch"), ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), - HotTier: &ElasticsearchTopology{ - id: "hot_content", - Size: ec.String("2g"), - ZoneCount: 1, + Topology: ElasticsearchTopologies{ + { + Id: "hot_content", + Size: ec.String("2g"), + ZoneCount: 1, + }, }, Strategy: ec.String("autodetect"), }, @@ -1712,10 +1740,12 @@ func Test_writeElasticsearch(t *testing.T) { RefId: ec.String("main-elasticsearch"), ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), - HotTier: &ElasticsearchTopology{ - id: "hot_content", - Size: ec.String("2g"), - ZoneCount: 1, + Topology: ElasticsearchTopologies{ + { + Id: "hot_content", + Size: ec.String("2g"), + ZoneCount: 1, + }, }, Strategy: ec.String("grow_and_shrink"), }, @@ -1784,10 +1814,12 @@ func Test_writeElasticsearch(t *testing.T) { RefId: ec.String("main-elasticsearch"), ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), - HotTier: &ElasticsearchTopology{ - id: "hot_content", - Size: ec.String("2g"), - ZoneCount: 1, + Topology: ElasticsearchTopologies{ + { + Id: "hot_content", + Size: ec.String("2g"), + ZoneCount: 1, + }, }, Strategy: ec.String("rolling_grow_and_shrink"), }, @@ -1856,10 +1888,12 @@ func Test_writeElasticsearch(t *testing.T) { RefId: ec.String("main-elasticsearch"), ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), - HotTier: &ElasticsearchTopology{ - id: "hot_content", - Size: ec.String("2g"), - ZoneCount: 1, + Topology: ElasticsearchTopologies{ + { + Id: "hot_content", + Size: ec.String("2g"), + ZoneCount: 1, + }, }, Strategy: ec.String("rolling_all"), }, diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read.go index 7ae4f8b14..37b8b2231 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read.go @@ -25,27 +25,21 @@ import ( ) type Elasticsearch struct { - Autoscale *bool `tfsdk:"autoscale"` - RefId *string `tfsdk:"ref_id"` - ResourceId *string `tfsdk:"resource_id"` - Region *string `tfsdk:"region"` - CloudID *string `tfsdk:"cloud_id"` - HttpEndpoint *string `tfsdk:"http_endpoint"` - HttpsEndpoint *string `tfsdk:"https_endpoint"` - HotTier *ElasticsearchTopology `tfsdk:"hot"` - CoordinatingTier *ElasticsearchTopology `tfsdk:"coordinating"` - MasterTier *ElasticsearchTopology `tfsdk:"master"` - WarmTier *ElasticsearchTopology `tfsdk:"warm"` - ColdTier *ElasticsearchTopology `tfsdk:"cold"` - FrozenTier *ElasticsearchTopology `tfsdk:"frozen"` - MlTier *ElasticsearchTopology `tfsdk:"ml"` - Config *ElasticsearchConfig `tfsdk:"config"` - RemoteCluster ElasticsearchRemoteClusters `tfsdk:"remote_cluster"` - SnapshotSource *ElasticsearchSnapshotSource `tfsdk:"snapshot_source"` - Extension ElasticsearchExtensions `tfsdk:"extension"` - TrustAccount ElasticsearchTrustAccounts `tfsdk:"trust_account"` - TrustExternal ElasticsearchTrustExternals `tfsdk:"trust_external"` - Strategy *string `tfsdk:"strategy"` + Autoscale *bool `tfsdk:"autoscale"` + RefId *string `tfsdk:"ref_id"` + ResourceId *string `tfsdk:"resource_id"` + Region *string `tfsdk:"region"` + CloudID *string `tfsdk:"cloud_id"` + HttpEndpoint *string `tfsdk:"http_endpoint"` + HttpsEndpoint *string `tfsdk:"https_endpoint"` + Topology ElasticsearchTopologies `tfsdk:"topology"` + Config *ElasticsearchConfig `tfsdk:"config"` + RemoteCluster ElasticsearchRemoteClusters `tfsdk:"remote_cluster"` + SnapshotSource *ElasticsearchSnapshotSource `tfsdk:"snapshot_source"` + Extension ElasticsearchExtensions `tfsdk:"extension"` + TrustAccount ElasticsearchTrustAccounts `tfsdk:"trust_account"` + TrustExternal ElasticsearchTrustExternals `tfsdk:"trust_external"` + Strategy *string `tfsdk:"strategy"` } func ReadElasticsearches(in []*models.ElasticsearchResourceInfo, remotes *models.RemoteResources) (*Elasticsearch, error) { @@ -89,7 +83,7 @@ func readElasticsearch(in *models.ElasticsearchResourceInfo, remotes *models.Rem if err != nil { return nil, err } - es.setTopology(topologies) + es.Topology = topologies if plan.AutoscalingEnabled != nil { es.Autoscale = plan.AutoscalingEnabled @@ -133,30 +127,6 @@ func readElasticsearch(in *models.ElasticsearchResourceInfo, remotes *models.Rem return &es, nil } -func (es *Elasticsearch) setTopology(topologies ElasticsearchTopologies) { - set := topologies.AsSet() - - for id, topology := range set { - topology := topology - switch id { - case "hot_content": - es.HotTier = &topology - case "coordinating": - es.CoordinatingTier = &topology - case "master": - es.MasterTier = &topology - case "warm": - es.WarmTier = &topology - case "cold": - es.ColdTier = &topology - case "frozen": - es.FrozenTier = &topology - case "ml": - es.MlTier = &topology - } - } -} - // IsElasticsearchStopped returns true if the resource is stopped. func IsElasticsearchStopped(res *models.ElasticsearchResourceInfo) bool { return res == nil || res.Info == nil || res.Info.Status == nil || diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go index 9900db03e..dc63edf04 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go @@ -157,17 +157,19 @@ func Test_readElasticsearch(t *testing.T) { HttpEndpoint: ec.String("http://somecluster.cloud.elastic.co:9200"), HttpsEndpoint: ec.String("https://somecluster.cloud.elastic.co:9243"), Config: &ElasticsearchConfig{}, - HotTier: &ElasticsearchTopology{ - id: "hot_content", - InstanceConfigurationId: ec.String("aws.data.highio.i3"), - Size: ec.String("2g"), - SizeResource: ec.String("memory"), - NodeTypeData: ec.String("true"), - NodeTypeIngest: ec.String("true"), - NodeTypeMaster: ec.String("true"), - NodeTypeMl: ec.String("false"), - ZoneCount: 1, - Autoscaling: &ElasticsearchTopologyAutoscaling{}, + Topology: ElasticsearchTopologies{ + { + Id: "hot_content", + InstanceConfigurationId: ec.String("aws.data.highio.i3"), + Size: ec.String("2g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 1, + Autoscaling: &ElasticsearchTopologyAutoscaling{}, + }, }, }, }, @@ -236,17 +238,19 @@ func Test_readElasticsearch(t *testing.T) { UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), }, - HotTier: &ElasticsearchTopology{ - id: "hot_content", - InstanceConfigurationId: ec.String("aws.data.highio.i3"), - Size: ec.String("2g"), - SizeResource: ec.String("memory"), - NodeTypeData: ec.String("true"), - NodeTypeIngest: ec.String("true"), - NodeTypeMaster: ec.String("true"), - NodeTypeMl: ec.String("false"), - ZoneCount: 1, - Autoscaling: &ElasticsearchTopologyAutoscaling{}, + Topology: ElasticsearchTopologies{ + { + Id: "hot_content", + InstanceConfigurationId: ec.String("aws.data.highio.i3"), + Size: ec.String("2g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 1, + Autoscaling: &ElasticsearchTopologyAutoscaling{}, + }, }, }, }, @@ -303,7 +307,7 @@ func Test_readElasticsearchTopology(t *testing.T) { }}, want: ElasticsearchTopologies{ { - id: "hot_content", + Id: "hot_content", InstanceConfigurationId: ec.String("aws.data.highio.i3"), Size: ec.String("4g"), SizeResource: ec.String("memory"), @@ -314,7 +318,7 @@ func Test_readElasticsearchTopology(t *testing.T) { Autoscaling: &ElasticsearchTopologyAutoscaling{}, }, { - id: "coordinating", + Id: "coordinating", InstanceConfigurationId: ec.String("aws.coordinating.m5"), Size: ec.String("0g"), SizeResource: ec.String("memory"), @@ -359,7 +363,7 @@ func Test_readElasticsearchTopology(t *testing.T) { }}, want: ElasticsearchTopologies{ { - id: "hot_content", + Id: "hot_content", InstanceConfigurationId: ec.String("aws.data.highio.i3"), Size: ec.String("4g"), SizeResource: ec.String("memory"), @@ -370,7 +374,7 @@ func Test_readElasticsearchTopology(t *testing.T) { Autoscaling: &ElasticsearchTopologyAutoscaling{}, }, { - id: "ml", + Id: "ml", InstanceConfigurationId: ec.String("aws.ml.m5"), Size: ec.String("0g"), SizeResource: ec.String("memory"), diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_test_utils.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_test_utils.go index c007c4823..070fb420c 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_test_utils.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_test_utils.go @@ -21,7 +21,7 @@ import "github.com/elastic/cloud-sdk-go/pkg/models" func CreateTierForTest(tierId string, tier ElasticsearchTopology) *ElasticsearchTopology { res := tier - res.id = tierId + res.Id = tierId return &res } diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go index 688e9fab4..10622e807 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go @@ -38,6 +38,7 @@ import ( ) type ElasticsearchTopologyTF struct { + Id types.String `tfsdk:"id"` InstanceConfigurationId types.String `tfsdk:"instance_configuration_id"` Size types.String `tfsdk:"size"` SizeResource types.String `tfsdk:"size_resource"` @@ -51,7 +52,7 @@ type ElasticsearchTopologyTF struct { } type ElasticsearchTopology struct { - id string + Id string `tfsdk:"id"` InstanceConfigurationId *string `tfsdk:"instance_configuration_id"` Size *string `tfsdk:"size"` SizeResource *string `tfsdk:"size_resource"` @@ -66,6 +67,18 @@ type ElasticsearchTopology struct { type ElasticsearchTopologyAutoscaling v1.ElasticsearchTopologyAutoscaling +type ElasticsearchTopologiesTF []*ElasticsearchTopologyTF + +func (tops ElasticsearchTopologiesTF) AsSet() map[string]*ElasticsearchTopologyTF { + set := make(map[string]*ElasticsearchTopologyTF, len(tops)) + + for _, top := range tops { + set[top.Id.Value] = top + } + + return set +} + func (topology ElasticsearchTopologyTF) payload(ctx context.Context, topologyID string, planTopologies []*models.ElasticsearchClusterTopologyElement) diag.Diagnostics { var diags diag.Diagnostics @@ -129,7 +142,7 @@ func readElasticsearchTopologies(in *models.ElasticsearchClusterPlan) (Elasticse func readElasticsearchTopology(model *models.ElasticsearchClusterTopologyElement) (*ElasticsearchTopology, error) { var topology ElasticsearchTopology - topology.id = model.ID + topology.Id = model.ID if model.InstanceConfigurationID != "" { topology.InstanceConfigurationId = &model.InstanceConfigurationID @@ -260,16 +273,6 @@ func objectToTopology(ctx context.Context, obj types.Object) (*ElasticsearchTopo type ElasticsearchTopologies []ElasticsearchTopology -func (tops ElasticsearchTopologies) AsSet() map[string]ElasticsearchTopology { - set := make(map[string]ElasticsearchTopology, len(tops)) - - for _, top := range tops { - set[top.id] = top - } - - return set -} - func matchEsTopologyID(id string, topologies []*models.ElasticsearchClusterTopologyElement) (*models.ElasticsearchClusterTopologyElement, error) { for _, t := range topologies { if t.ID == id { diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier_test.go index c727180ce..2224edb26 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier_test.go @@ -17,17 +17,7 @@ package v2_test -import ( - "context" - "testing" - - deploymentv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/deployment/v2" - v2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" - "github.com/hashicorp/terraform-plugin-framework/tfsdk" - "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/stretchr/testify/assert" -) - +/* func Test_nodeRolesPlanModifier(t *testing.T) { type args struct { attributeState []string @@ -214,3 +204,4 @@ func Test_nodeRolesPlanModifier(t *testing.T) { }) } } +*/ diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_test.go index 932fd5dbe..615a51400 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_test.go @@ -129,12 +129,14 @@ func Test_UseNodeRoles(t *testing.T) { stateVersion: "7.11.1", planVersion: "7.12.0", elasticsearch: Elasticsearch{ - HotTier: &ElasticsearchTopology{ - id: "hot_content", - NodeTypeData: ec.String("true"), - NodeTypeMaster: ec.String("true"), - NodeTypeIngest: ec.String("true"), - NodeTypeMl: ec.String("false"), + Topology: ElasticsearchTopologies{ + { + Id: "hot_content", + NodeTypeData: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMl: ec.String("false"), + }, }, }, }, diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier_test.go index dfd14863c..a73f5fc8d 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier_test.go @@ -17,17 +17,7 @@ package v2_test -import ( - "context" - "testing" - - deploymentv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/deployment/v2" - v2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" - "github.com/hashicorp/terraform-plugin-framework/tfsdk" - "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/stretchr/testify/assert" -) - +/* func Test_nodeTypesPlanModifier(t *testing.T) { type args struct { attributeState types.String @@ -184,3 +174,4 @@ func Test_nodeTypesPlanModifier(t *testing.T) { }) } } +*/ diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go index 4cdea11d2..b2ea3665e 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go @@ -96,13 +96,7 @@ func ElasticsearchSchema() tfsdk.Attribute { Computed: true, }, - "hot": ElasticsearchTopologySchema("'hot' topology element", true, "hot"), - "coordinating": ElasticsearchTopologySchema("'coordinating' topology element", false, "coordinating"), - "master": ElasticsearchTopologySchema("'master' topology element", false, "master"), - "warm": ElasticsearchTopologySchema("'warm' topology element", false, "warm"), - "cold": ElasticsearchTopologySchema("'cold' topology element", false, "cold"), - "frozen": ElasticsearchTopologySchema("'frozen' topology element", false, "frozen"), - "ml": ElasticsearchTopologySchema("'ml' topology element", false, "ml"), + "topology": ElasticsearchTopologySchema(), "trust_account": ElasticsearchTrustAccountSchema(), @@ -168,7 +162,7 @@ func ElasticsearchConfigSchema() tfsdk.Attribute { } } -func ElasticsearchTopologyAutoscalingSchema(topologyAttributeName string) tfsdk.Attribute { +func ElasticsearchTopologyAutoscalingSchema() tfsdk.Attribute { return tfsdk.Attribute{ Description: "Optional Elasticsearch autoscaling settings, such a maximum and minimum size and resources.", Required: true, @@ -178,44 +172,29 @@ func ElasticsearchTopologyAutoscalingSchema(topologyAttributeName string) tfsdk. Type: types.StringType, Optional: true, Computed: true, - PlanModifiers: []tfsdk.AttributePlanModifier{ - UseTopologyStateForUnknown(topologyAttributeName), - }, }, "max_size": { Description: "Maximum size value for the maximum autoscaling setting.", Type: types.StringType, Optional: true, Computed: true, - PlanModifiers: []tfsdk.AttributePlanModifier{ - UseTopologyStateForUnknown(topologyAttributeName), - }, }, "min_size_resource": { Description: "Minimum resource type for the minimum autoscaling setting.", Type: types.StringType, Optional: true, Computed: true, - PlanModifiers: []tfsdk.AttributePlanModifier{ - UseTopologyStateForUnknown(topologyAttributeName), - }, }, "min_size": { Description: "Minimum size value for the minimum autoscaling setting.", Type: types.StringType, Optional: true, Computed: true, - PlanModifiers: []tfsdk.AttributePlanModifier{ - UseTopologyStateForUnknown(topologyAttributeName), - }, }, "policy_override_json": { Type: types.StringType, Description: "Computed policy overrides set directly via the API or other clients.", Computed: true, - PlanModifiers: []tfsdk.AttributePlanModifier{ - UseTopologyStateForUnknown(topologyAttributeName), - }, }, }), } @@ -375,85 +354,63 @@ func ElasticsearchTrustExternalSchema() tfsdk.Attribute { } } -func ElasticsearchTopologySchema(description string, required bool, topologyAttributeName string) tfsdk.Attribute { +func ElasticsearchTopologySchema() tfsdk.Attribute { return tfsdk.Attribute{ - Optional: !required, - // it should be Computed but Computed triggers TF weird behaviour that leads to unempty plan for zero change config - // Computed: true, - Required: required, - Description: description, - Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + Optional: true, + Computed: true, + Description: `Optional topology element which must be set once but can be set multiple times to compose complex topologies`, + Attributes: tfsdk.SetNestedAttributes(map[string]tfsdk.Attribute{ + "id": { + Type: types.StringType, + Description: `Required topology ID from the deployment template`, + Required: true, + }, "instance_configuration_id": { Type: types.StringType, Description: `Computed Instance Configuration ID of the topology element`, Computed: true, - PlanModifiers: tfsdk.AttributePlanModifiers{ - UseTopologyStateForUnknown(topologyAttributeName), - }, }, "size": { Type: types.StringType, Description: `Amount of "size_resource" per node in the "g" notation`, Computed: true, Optional: true, - PlanModifiers: tfsdk.AttributePlanModifiers{ - UseTopologyStateForUnknown(topologyAttributeName), - }, }, "size_resource": { Type: types.StringType, Description: `Size type, defaults to "memory".`, Optional: true, Computed: true, - PlanModifiers: []tfsdk.AttributePlanModifier{ - planmodifier.DefaultValue(types.String{Value: "memory"}), - }, }, "zone_count": { Type: types.Int64Type, Description: `Number of zones that the Elasticsearch cluster will span. This is used to set HA`, Computed: true, Optional: true, - PlanModifiers: tfsdk.AttributePlanModifiers{ - resource.UseStateForUnknown(), - UseTopologyStateForUnknown(topologyAttributeName), - }, }, "node_type_data": { Type: types.StringType, Description: `The node type for the Elasticsearch Topology element (data node)`, Computed: true, Optional: true, - PlanModifiers: tfsdk.AttributePlanModifiers{ - UseNodeTypesDefault(), - }, }, "node_type_master": { Type: types.StringType, Description: `The node type for the Elasticsearch Topology element (master node)`, Computed: true, Optional: true, - PlanModifiers: tfsdk.AttributePlanModifiers{ - UseNodeTypesDefault(), - }, }, "node_type_ingest": { Type: types.StringType, Description: `The node type for the Elasticsearch Topology element (ingest node)`, Computed: true, Optional: true, - PlanModifiers: tfsdk.AttributePlanModifiers{ - UseNodeTypesDefault(), - }, }, "node_type_ml": { Type: types.StringType, Description: `The node type for the Elasticsearch Topology element (machine learning node)`, Computed: true, Optional: true, - PlanModifiers: tfsdk.AttributePlanModifiers{ - UseNodeTypesDefault(), - }, }, "node_roles": { Type: types.SetType{ @@ -461,11 +418,8 @@ func ElasticsearchTopologySchema(description string, required bool, topologyAttr }, Description: `The computed list of node roles for the current topology element`, Computed: true, - PlanModifiers: tfsdk.AttributePlanModifiers{ - UseNodeRolesDefault(), - }, }, - "autoscaling": ElasticsearchTopologyAutoscalingSchema(topologyAttributeName), + "autoscaling": ElasticsearchTopologyAutoscalingSchema(), }), } } diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier_test.go index b31d6db79..3cfe129cb 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier_test.go @@ -17,6 +17,7 @@ package v2_test +/* import ( "context" "testing" @@ -73,19 +74,23 @@ func Test_topologyPlanModifier(t *testing.T) { deploymentState: deploymentv2.Deployment{ DeploymentTemplateId: "aws-io-optimized-v2", Elasticsearch: &v2.Elasticsearch{ - HotTier: v2.CreateTierForTest("hot_content", v2.ElasticsearchTopology{ - Autoscaling: &v2.ElasticsearchTopologyAutoscaling{ - MinSize: ec.String("1g"), - }, - }), + Topology: v2.ElasticsearchTopologies{ + *v2.CreateTierForTest("hot_content", v2.ElasticsearchTopology{ + Autoscaling: &v2.ElasticsearchTopologyAutoscaling{ + MinSize: ec.String("1g"), + }, + }), + }, }, }, deploymentPlan: deploymentv2.Deployment{ DeploymentTemplateId: "aws-storage-optimized-v3", Elasticsearch: &v2.Elasticsearch{ - HotTier: v2.CreateTierForTest("hot_content", v2.ElasticsearchTopology{ - Autoscaling: &v2.ElasticsearchTopologyAutoscaling{}, - }), + Topology: v2.ElasticsearchTopologies{ + *v2.CreateTierForTest("hot_content", v2.ElasticsearchTopology{ + Autoscaling: &v2.ElasticsearchTopologyAutoscaling{}, + }), + }, }, }, }, @@ -100,17 +105,21 @@ func Test_topologyPlanModifier(t *testing.T) { deploymentState: deploymentv2.Deployment{ DeploymentTemplateId: "aws-io-optimized-v2", Elasticsearch: &v2.Elasticsearch{ - HotTier: v2.CreateTierForTest("hot_content", v2.ElasticsearchTopology{ - Autoscaling: &v2.ElasticsearchTopologyAutoscaling{}, - }), + Topology: v2.ElasticsearchTopologies{ + *v2.CreateTierForTest("hot_content", v2.ElasticsearchTopology{ + Autoscaling: &v2.ElasticsearchTopologyAutoscaling{}, + }), + }, }, }, deploymentPlan: deploymentv2.Deployment{ DeploymentTemplateId: "aws-io-optimized-v2", Elasticsearch: &v2.Elasticsearch{ - HotTier: v2.CreateTierForTest("hot_content", v2.ElasticsearchTopology{ - Autoscaling: &v2.ElasticsearchTopologyAutoscaling{}, - }), + Topology: v2.ElasticsearchTopologies{ + *v2.CreateTierForTest("hot_content", v2.ElasticsearchTopology{ + Autoscaling: &v2.ElasticsearchTopologyAutoscaling{}, + }), + }, }, }, }, @@ -125,19 +134,23 @@ func Test_topologyPlanModifier(t *testing.T) { deploymentState: deploymentv2.Deployment{ DeploymentTemplateId: "aws-io-optimized-v2", Elasticsearch: &v2.Elasticsearch{ - HotTier: v2.CreateTierForTest("hot_content", v2.ElasticsearchTopology{ - Autoscaling: &v2.ElasticsearchTopologyAutoscaling{ - MaxSize: ec.String("1g"), - }, - }), + Topology: v2.ElasticsearchTopologies{ + *v2.CreateTierForTest("hot_content", v2.ElasticsearchTopology{ + Autoscaling: &v2.ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("1g"), + }, + }), + }, }, }, deploymentPlan: deploymentv2.Deployment{ DeploymentTemplateId: "aws-io-optimized-v2", Elasticsearch: &v2.Elasticsearch{ - HotTier: v2.CreateTierForTest("hot_content", v2.ElasticsearchTopology{ - Autoscaling: &v2.ElasticsearchTopologyAutoscaling{}, - }), + Topology: v2.ElasticsearchTopologies{ + *v2.CreateTierForTest("hot_content", v2.ElasticsearchTopology{ + Autoscaling: &v2.ElasticsearchTopologyAutoscaling{}, + }), + }, }, }, }, @@ -199,3 +212,4 @@ func unknownValueFromAttrType(t *testing.T, attributeType attr.Type) attr.Value assert.Nil(t, err) return val } +*/ diff --git a/ec/ecresource/deploymentresource/read.go b/ec/ecresource/deploymentresource/read.go index 77cddf660..33959c533 100644 --- a/ec/ecresource/deploymentresource/read.go +++ b/ec/ecresource/deploymentresource/read.go @@ -146,7 +146,7 @@ func (r *Resource) read(ctx context.Context, id string, state *deploymentv2.Depl deployment.ProcessSelfInObservability() - deployment.NullifyUnusedEsTopologies(ctx, elasticsearchPlan) + // deployment.NullifyUnusedEsTopologies(ctx, elasticsearchPlan) // ReadDeployment returns empty config struct if there is no config, so we have to nullify it if plan doesn't contain it // we use state for plan in Read and there is no state during import so we need to check elasticsearchPlan against nil From ef276f8497636ab09b6dbacc7829dc375a746b7d Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 18 Jan 2023 14:08:01 +0100 Subject: [PATCH 085/104] Update README - add a note for updaint the TF client --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 896fa8494..16a0eb2e6 100644 --- a/README.md +++ b/README.md @@ -238,3 +238,5 @@ This happens because TF Framework treats all `computed` attributes as `unknown` However, it doesn't mean that all attributes that marked as `unknown` in the plan will get new values after apply. To mitigitate the problem, the provider uses plan modifiers that is a recommended way to reduce plan output. However, currently plan modifiers don't cover all the `computed` attributes. + +Please make sure to update to the latest TF client version. From 910ebc00e9fb79cb112d146d36c5f9c966d781b5 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 18 Jan 2023 16:12:21 +0100 Subject: [PATCH 086/104] Fixes for topologies as a set --- build/Makefile.test | 2 +- .../deployment/v2/deployment_read_test.go | 118 +++++++++--------- .../v2/elasticsearch_read_test.go | 16 +-- .../v2/elasticsearch_topology.go | 15 ++- ec/ecresource/deploymentresource/read.go | 2 +- 5 files changed, 80 insertions(+), 73 deletions(-) diff --git a/build/Makefile.test b/build/Makefile.test index 90a4a1763..bf7c0f02e 100644 --- a/build/Makefile.test +++ b/build/Makefile.test @@ -3,7 +3,7 @@ SWEEP_DIR ?= $(TEST_ACC) SWEEP_CI_RUN_FILTER ?= ec_deployments TEST ?= ./... TEST_COUNT ?= 1 -TESTUNITARGS ?= -timeout 5m -race -cover -coverprofile=reports/c.out +TESTUNITARGS ?= -timeout 10m -race -cover -coverprofile=reports/c.out TEST_ACC ?= github.com/elastic/terraform-provider-ec/ec/acc TEST_NAME ?= TestAcc TEST_ACC_PARALLEL = 6 diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_read_test.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_read_test.go index a49e2644f..af05c53a5 100644 --- a/ec/ecresource/deploymentresource/deployment/v2/deployment_read_test.go +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_read_test.go @@ -1188,6 +1188,18 @@ func Test_readDeployment(t *testing.T) { HttpsEndpoint: ec.String("https://123695e76d914005bf90b717e668ad4b.asia-east1.gcp.elastic-cloud.com:9243"), Config: &elasticsearchv2.ElasticsearchConfig{}, Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "coordinating", + InstanceConfigurationId: ec.String("gcp.coordinating.1"), + Size: ec.String("0g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("false"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("false"), + NodeTypeMl: ec.String("false"), + ZoneCount: 2, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, { Id: "hot_content", InstanceConfigurationId: ec.String("gcp.data.highio.1"), @@ -1204,23 +1216,6 @@ func Test_readDeployment(t *testing.T) { PolicyOverrideJson: ec.String(`{"proactive_storage":{"forecast_window":"3 h"}}`), }, }, - { - Id: "ml", - InstanceConfigurationId: ec.String("gcp.ml.1"), - Size: ec.String("1g"), - SizeResource: ec.String("memory"), - NodeTypeData: ec.String("false"), - NodeTypeIngest: ec.String("false"), - NodeTypeMaster: ec.String("false"), - NodeTypeMl: ec.String("true"), - ZoneCount: 1, - Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{ - MaxSize: ec.String("30g"), - MaxSizeResource: ec.String("memory"), - MinSize: ec.String("1g"), - MinSizeResource: ec.String("memory"), - }, - }, { Id: "master", InstanceConfigurationId: ec.String("gcp.master.1"), @@ -1234,16 +1229,21 @@ func Test_readDeployment(t *testing.T) { Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, { - Id: "coordinating", - InstanceConfigurationId: ec.String("gcp.coordinating.1"), - Size: ec.String("0g"), + Id: "ml", + InstanceConfigurationId: ec.String("gcp.ml.1"), + Size: ec.String("1g"), SizeResource: ec.String("memory"), NodeTypeData: ec.String("false"), - NodeTypeIngest: ec.String("true"), + NodeTypeIngest: ec.String("false"), NodeTypeMaster: ec.String("false"), - NodeTypeMl: ec.String("false"), - ZoneCount: 2, - Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + NodeTypeMl: ec.String("true"), + ZoneCount: 1, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("30g"), + MaxSizeResource: ec.String("memory"), + MinSize: ec.String("1g"), + MinSizeResource: ec.String("memory"), + }, }, }, }, @@ -1293,6 +1293,18 @@ func Test_readDeployment(t *testing.T) { HttpsEndpoint: ec.String("https://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9243"), Config: &elasticsearchv2.ElasticsearchConfig{}, Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "coordinating", + InstanceConfigurationId: ec.String("gcp.coordinating.1"), + Size: ec.String("0g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("false"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("false"), + NodeTypeMl: ec.String("false"), + ZoneCount: 2, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, { Id: "hot_content", InstanceConfigurationId: ec.String("gcp.data.highio.1"), @@ -1318,18 +1330,6 @@ func Test_readDeployment(t *testing.T) { ZoneCount: 2, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, - { - Id: "coordinating", - InstanceConfigurationId: ec.String("gcp.coordinating.1"), - Size: ec.String("0g"), - SizeResource: ec.String("memory"), - NodeTypeData: ec.String("false"), - NodeTypeIngest: ec.String("true"), - NodeTypeMaster: ec.String("false"), - NodeTypeMl: ec.String("false"), - ZoneCount: 2, - Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, }, }, Kibana: &kibanav2.Kibana{ @@ -1378,6 +1378,15 @@ func Test_readDeployment(t *testing.T) { HttpsEndpoint: ec.String("https://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9243"), Config: &elasticsearchv2.ElasticsearchConfig{}, Topology: elasticsearchv2.ElasticsearchTopologies{ + { + Id: "coordinating", + InstanceConfigurationId: ec.String("gcp.coordinating.1"), + Size: ec.String("0g"), + SizeResource: ec.String("memory"), + ZoneCount: 2, + NodeRoles: []string{"ingest", "remote_cluster_client"}, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, { Id: "hot_content", InstanceConfigurationId: ec.String("gcp.data.highio.1"), @@ -1395,16 +1404,13 @@ func Test_readDeployment(t *testing.T) { Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, { - Id: "warm", - InstanceConfigurationId: ec.String("gcp.data.highstorage.1"), - Size: ec.String("4g"), + Id: "master", + InstanceConfigurationId: ec.String("gcp.master.1"), + Size: ec.String("0g"), SizeResource: ec.String("memory"), - ZoneCount: 2, - NodeRoles: []string{ - "data_warm", - "remote_cluster_client", - }, - Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + ZoneCount: 3, + NodeRoles: []string{"master"}, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, { Id: "ml", @@ -1416,22 +1422,16 @@ func Test_readDeployment(t *testing.T) { Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, { - Id: "master", - InstanceConfigurationId: ec.String("gcp.master.1"), - Size: ec.String("0g"), - SizeResource: ec.String("memory"), - ZoneCount: 3, - NodeRoles: []string{"master"}, - Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "coordinating", - InstanceConfigurationId: ec.String("gcp.coordinating.1"), - Size: ec.String("0g"), + Id: "warm", + InstanceConfigurationId: ec.String("gcp.data.highstorage.1"), + Size: ec.String("4g"), SizeResource: ec.String("memory"), ZoneCount: 2, - NodeRoles: []string{"ingest", "remote_cluster_client"}, - Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + NodeRoles: []string{ + "data_warm", + "remote_cluster_client", + }, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, }, }, }, diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go index dc63edf04..f402b00ce 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go @@ -306,6 +306,14 @@ func Test_readElasticsearchTopology(t *testing.T) { }, }}, want: ElasticsearchTopologies{ + { + Id: "coordinating", + InstanceConfigurationId: ec.String("aws.coordinating.m5"), + Size: ec.String("0g"), + SizeResource: ec.String("memory"), + ZoneCount: 2, + Autoscaling: &ElasticsearchTopologyAutoscaling{}, + }, { Id: "hot_content", InstanceConfigurationId: ec.String("aws.data.highio.i3"), @@ -317,14 +325,6 @@ func Test_readElasticsearchTopology(t *testing.T) { NodeTypeMaster: ec.String("true"), Autoscaling: &ElasticsearchTopologyAutoscaling{}, }, - { - Id: "coordinating", - InstanceConfigurationId: ec.String("aws.coordinating.m5"), - Size: ec.String("0g"), - SizeResource: ec.String("memory"), - ZoneCount: 2, - Autoscaling: &ElasticsearchTopologyAutoscaling{}, - }, }, }, { diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go index 10622e807..d8e0ed990 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go @@ -22,6 +22,7 @@ import ( "encoding/json" "fmt" "reflect" + "sort" "strconv" "strings" @@ -126,17 +127,23 @@ func readElasticsearchTopologies(in *models.ElasticsearchClusterPlan) (Elasticse return nil, nil } - tops := make([]ElasticsearchTopology, 0, len(in.ClusterTopology)) + topology := make([]ElasticsearchTopology, 0, len(in.ClusterTopology)) for _, model := range in.ClusterTopology { - topology, err := readElasticsearchTopology(model) + tier, err := readElasticsearchTopology(model) if err != nil { return nil, err } - tops = append(tops, *topology) + if tier.Id != "" { + topology = append(topology, *tier) + } } - return tops, nil + sort.Slice(topology, func(i, j int) bool { + return topology[i].Id < topology[j].Id + }) + + return topology, nil } func readElasticsearchTopology(model *models.ElasticsearchClusterTopologyElement) (*ElasticsearchTopology, error) { diff --git a/ec/ecresource/deploymentresource/read.go b/ec/ecresource/deploymentresource/read.go index 33959c533..77cddf660 100644 --- a/ec/ecresource/deploymentresource/read.go +++ b/ec/ecresource/deploymentresource/read.go @@ -146,7 +146,7 @@ func (r *Resource) read(ctx context.Context, id string, state *deploymentv2.Depl deployment.ProcessSelfInObservability() - // deployment.NullifyUnusedEsTopologies(ctx, elasticsearchPlan) + deployment.NullifyUnusedEsTopologies(ctx, elasticsearchPlan) // ReadDeployment returns empty config struct if there is no config, so we have to nullify it if plan doesn't contain it // we use state for plan in Read and there is no state during import so we need to check elasticsearchPlan against nil From 48d04c3aef6bd9cabc5f416ad33808e8437292bf Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 18 Jan 2023 16:58:10 +0100 Subject: [PATCH 087/104] Fix elasticsearch topology description and remove obsolete func --- .../elasticsearch/v2/elasticsearch_topology.go | 14 -------------- .../deploymentresource/elasticsearch/v2/schema.go | 2 +- 2 files changed, 1 insertion(+), 15 deletions(-) diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go index d8e0ed990..e9a7fae14 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go @@ -264,20 +264,6 @@ func (topology *ElasticsearchTopologyTF) HasNodeType() bool { return false } -func objectToTopology(ctx context.Context, obj types.Object) (*ElasticsearchTopologyTF, diag.Diagnostics) { - if obj.IsNull() || obj.IsUnknown() { - return nil, nil - } - - var topology *ElasticsearchTopologyTF - - if diags := tfsdk.ValueAs(ctx, obj, &topology); diags.HasError() { - return nil, diags - } - - return topology, nil -} - type ElasticsearchTopologies []ElasticsearchTopology func matchEsTopologyID(id string, topologies []*models.ElasticsearchClusterTopologyElement) (*models.ElasticsearchClusterTopologyElement, error) { diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go index b2ea3665e..5c43958aa 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go @@ -358,7 +358,7 @@ func ElasticsearchTopologySchema() tfsdk.Attribute { return tfsdk.Attribute{ Optional: true, Computed: true, - Description: `Optional topology element which must be set once but can be set multiple times to compose complex topologies`, + Description: `Elasticsearch topology`, Attributes: tfsdk.SetNestedAttributes(map[string]tfsdk.Attribute{ "id": { Type: types.StringType, From 3d6835c2d88b36f6750657372e00e428ba7e104d Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Thu, 19 Jan 2023 09:38:36 +0100 Subject: [PATCH 088/104] Fixes for NullifyUnusedEsTopologies --- .../deploymentresource/deployment/v2/deployment_read.go | 4 ++-- ec/ecresource/deploymentresource/read.go | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_read.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_read.go index 45508ac0a..7146934b8 100644 --- a/ec/ecresource/deploymentresource/deployment/v2/deployment_read.go +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_read.go @@ -70,13 +70,13 @@ func (dep *Deployment) NullifyUnusedEsTopologies(ctx context.Context, esPlan *el } var planTopology elasticsearchv2.ElasticsearchTopologiesTF - if diags := esPlan.Topology.ElementsAs(ctx, planTopology, true); diags.HasError() { + if diags := esPlan.Topology.ElementsAs(ctx, &planTopology, true); diags.HasError() { return diags } planTopologiesSet := planTopology.AsSet() - filteredTopologies := make(elasticsearchv2.ElasticsearchTopologies, len(dep.Elasticsearch.Topology)) + filteredTopologies := make(elasticsearchv2.ElasticsearchTopologies, 0, len(dep.Elasticsearch.Topology)) for _, tier := range dep.Elasticsearch.Topology { planTier := planTopologiesSet[tier.Id] diff --git a/ec/ecresource/deploymentresource/read.go b/ec/ecresource/deploymentresource/read.go index 77cddf660..5db891b63 100644 --- a/ec/ecresource/deploymentresource/read.go +++ b/ec/ecresource/deploymentresource/read.go @@ -146,7 +146,9 @@ func (r *Resource) read(ctx context.Context, id string, state *deploymentv2.Depl deployment.ProcessSelfInObservability() - deployment.NullifyUnusedEsTopologies(ctx, elasticsearchPlan) + if diags := deployment.NullifyUnusedEsTopologies(ctx, elasticsearchPlan); diags.HasError() { + return nil, diags + } // ReadDeployment returns empty config struct if there is no config, so we have to nullify it if plan doesn't contain it // we use state for plan in Read and there is no state during import so we need to check elasticsearchPlan against nil From a2d6a62effb2dfdcad78f1a13ed1ccb7a6f09b3d Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Thu, 19 Jan 2023 09:43:31 +0100 Subject: [PATCH 089/104] Fix examples --- examples/deployment/outputs.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/deployment/outputs.tf b/examples/deployment/outputs.tf index 0d63eb92d..5fd329cf1 100644 --- a/examples/deployment/outputs.tf +++ b/examples/deployment/outputs.tf @@ -3,11 +3,11 @@ output "elasticsearch_version" { } output "elasticsearch_cloud_id" { - value = ec_deployment.example_minimal.elasticsearch[0].cloud_id + value = ec_deployment.example_minimal.elasticsearch.cloud_id } output "elasticsearch_https_endpoint" { - value = ec_deployment.example_minimal.elasticsearch[0].https_endpoint + value = ec_deployment.example_minimal.elasticsearch.https_endpoint } output "elasticsearch_username" { From 422d45e6d02739facb2c37c6db63ed3c16e5d33d Mon Sep 17 00:00:00 2001 From: Dmitry Onishchenko <8962171+dimuon@users.noreply.github.com> Date: Mon, 23 Jan 2023 10:59:14 +0100 Subject: [PATCH 090/104] Apply suggestions from code review Co-authored-by: Toby Brain --- ec/ecdatasource/deploymentdatasource/schema_elasticsearch.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ec/ecdatasource/deploymentdatasource/schema_elasticsearch.go b/ec/ecdatasource/deploymentdatasource/schema_elasticsearch.go index 47ddebe93..52bf4c9eb 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_elasticsearch.go +++ b/ec/ecdatasource/deploymentdatasource/schema_elasticsearch.go @@ -118,7 +118,7 @@ func elasticsearchTopologySchema() tfsdk.Attribute { }, "node_type_master": { Type: types.BoolType, - Description: " Defines whether this node can be elected master (<8.0).", + Description: "Defines whether this node can be elected master (<8.0).", Computed: true, }, "node_type_ingest": { @@ -186,7 +186,6 @@ func elasticsearchAutoscalingListType() attr.Type { func elasticsearchAutoscalingAttrTypes() map[string]attr.Type { return elasticsearchAutoscalingListType().(types.ListType).ElemType.(types.ObjectType).AttrTypes - } type elasticsearchResourceInfoModelV0 struct { From d53e5960bf591776a2755b2924be21593acbe4af Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Mon, 23 Jan 2023 15:26:14 +0100 Subject: [PATCH 091/104] makes elasticsearch topology required --- ec/ecresource/deploymentresource/elasticsearch/v2/schema.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go index 5c43958aa..56939a299 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go @@ -356,8 +356,9 @@ func ElasticsearchTrustExternalSchema() tfsdk.Attribute { func ElasticsearchTopologySchema() tfsdk.Attribute { return tfsdk.Attribute{ - Optional: true, - Computed: true, + Required: true, + // Optional: true, + // Computed: true, Description: `Elasticsearch topology`, Attributes: tfsdk.SetNestedAttributes(map[string]tfsdk.Attribute{ "id": { From ef67aa5aa49034c815085e261e8ad29fade4499e Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Mon, 23 Jan 2023 16:27:42 +0100 Subject: [PATCH 092/104] Addres PR comments - fix descriptions and scope of definitions --- .../v2/elasticsearch_read_test.go | 2 +- .../elasticsearch/v2/schema.go | 40 +++++++++---------- .../enterprisesearch/v1/schema.go | 4 +- .../enterprisesearch/v2/schema.go | 4 +- .../integrationsserver/v1/schema.go | 4 +- .../integrationsserver/v2/schema.go | 10 ++--- .../deploymentresource/kibana/v1/schema.go | 4 +- .../deploymentresource/kibana/v2/schema.go | 4 +- ec/internal/converters/extract_endpoint.go | 4 +- 9 files changed, 38 insertions(+), 38 deletions(-) diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go index 9900db03e..c3e9965e2 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go @@ -422,7 +422,7 @@ func Test_readElasticsearchConfig(t *testing.T) { assert.Equal(t, tt.want, got) var config types.Object - diags := tfsdk.ValueFrom(context.Background(), got, ElasticsearchConfigSchema().FrameworkType(), &config) + diags := tfsdk.ValueFrom(context.Background(), got, elasticsearchConfigSchema().FrameworkType(), &config) assert.Nil(t, diags) }) } diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go index 4cdea11d2..386a0b438 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go @@ -96,25 +96,25 @@ func ElasticsearchSchema() tfsdk.Attribute { Computed: true, }, - "hot": ElasticsearchTopologySchema("'hot' topology element", true, "hot"), - "coordinating": ElasticsearchTopologySchema("'coordinating' topology element", false, "coordinating"), - "master": ElasticsearchTopologySchema("'master' topology element", false, "master"), - "warm": ElasticsearchTopologySchema("'warm' topology element", false, "warm"), - "cold": ElasticsearchTopologySchema("'cold' topology element", false, "cold"), - "frozen": ElasticsearchTopologySchema("'frozen' topology element", false, "frozen"), - "ml": ElasticsearchTopologySchema("'ml' topology element", false, "ml"), + "hot": elasticsearchTopologySchema("'hot' topology element", true, "hot"), + "coordinating": elasticsearchTopologySchema("'coordinating' topology element", false, "coordinating"), + "master": elasticsearchTopologySchema("'master' topology element", false, "master"), + "warm": elasticsearchTopologySchema("'warm' topology element", false, "warm"), + "cold": elasticsearchTopologySchema("'cold' topology element", false, "cold"), + "frozen": elasticsearchTopologySchema("'frozen' topology element", false, "frozen"), + "ml": elasticsearchTopologySchema("'ml' topology element", false, "ml"), - "trust_account": ElasticsearchTrustAccountSchema(), + "trust_account": elasticsearchTrustAccountSchema(), - "trust_external": ElasticsearchTrustExternalSchema(), + "trust_external": elasticsearchTrustExternalSchema(), - "config": ElasticsearchConfigSchema(), + "config": elasticsearchConfigSchema(), "remote_cluster": ElasticsearchRemoteClusterSchema(), - "snapshot_source": ElasticsearchSnapshotSourceSchema(), + "snapshot_source": elasticsearchSnapshotSourceSchema(), - "extension": ElasticsearchExtensionSchema(), + "extension": elasticsearchExtensionSchema(), "strategy": { Description: "Configuration strategy type " + strings.Join(strategiesList, ", "), @@ -126,7 +126,7 @@ func ElasticsearchSchema() tfsdk.Attribute { } } -func ElasticsearchConfigSchema() tfsdk.Attribute { +func elasticsearchConfigSchema() tfsdk.Attribute { return tfsdk.Attribute{ Description: `Optional Elasticsearch settings which will be applied to all topologies`, Optional: true, @@ -168,7 +168,7 @@ func ElasticsearchConfigSchema() tfsdk.Attribute { } } -func ElasticsearchTopologyAutoscalingSchema(topologyAttributeName string) tfsdk.Attribute { +func elasticsearchTopologyAutoscalingSchema(topologyAttributeName string) tfsdk.Attribute { return tfsdk.Attribute{ Description: "Optional Elasticsearch autoscaling settings, such a maximum and minimum size and resources.", Required: true, @@ -262,7 +262,7 @@ func ElasticsearchRemoteClusterSchema() tfsdk.Attribute { } } -func ElasticsearchSnapshotSourceSchema() tfsdk.Attribute { +func elasticsearchSnapshotSourceSchema() tfsdk.Attribute { return tfsdk.Attribute{ Description: "Optional snapshot source settings. Restore data from a snapshot of another deployment.", Optional: true, @@ -285,7 +285,7 @@ func ElasticsearchSnapshotSourceSchema() tfsdk.Attribute { } } -func ElasticsearchExtensionSchema() tfsdk.Attribute { +func elasticsearchExtensionSchema() tfsdk.Attribute { return tfsdk.Attribute{ Description: "Optional Elasticsearch extensions such as custom bundles or plugins.", Optional: true, @@ -315,7 +315,7 @@ func ElasticsearchExtensionSchema() tfsdk.Attribute { } } -func ElasticsearchTrustAccountSchema() tfsdk.Attribute { +func elasticsearchTrustAccountSchema() tfsdk.Attribute { return tfsdk.Attribute{ Description: "Optional Elasticsearch account trust settings.", Attributes: tfsdk.SetNestedAttributes(map[string]tfsdk.Attribute{ @@ -345,7 +345,7 @@ func ElasticsearchTrustAccountSchema() tfsdk.Attribute { } } -func ElasticsearchTrustExternalSchema() tfsdk.Attribute { +func elasticsearchTrustExternalSchema() tfsdk.Attribute { return tfsdk.Attribute{ Description: "Optional Elasticsearch external trust settings.", Attributes: tfsdk.SetNestedAttributes(map[string]tfsdk.Attribute{ @@ -375,7 +375,7 @@ func ElasticsearchTrustExternalSchema() tfsdk.Attribute { } } -func ElasticsearchTopologySchema(description string, required bool, topologyAttributeName string) tfsdk.Attribute { +func elasticsearchTopologySchema(description string, required bool, topologyAttributeName string) tfsdk.Attribute { return tfsdk.Attribute{ Optional: !required, // it should be Computed but Computed triggers TF weird behaviour that leads to unempty plan for zero change config @@ -465,7 +465,7 @@ func ElasticsearchTopologySchema(description string, required bool, topologyAttr UseNodeRolesDefault(), }, }, - "autoscaling": ElasticsearchTopologyAutoscalingSchema(topologyAttributeName), + "autoscaling": elasticsearchTopologyAutoscalingSchema(topologyAttributeName), }), } } diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v1/schema.go b/ec/ecresource/deploymentresource/enterprisesearch/v1/schema.go index 565faacaf..112d40cf5 100644 --- a/ec/ecresource/deploymentresource/enterprisesearch/v1/schema.go +++ b/ec/ecresource/deploymentresource/enterprisesearch/v1/schema.go @@ -161,12 +161,12 @@ func EnterpriseSearchSchema() tfsdk.Attribute { }, "user_settings_yaml": { Type: types.StringType, - Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, Optional: true, }, "user_settings_override_yaml": { Type: types.StringType, - Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, Optional: true, }, }), diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/schema.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/schema.go index a3a0b6a06..4b71e8143 100644 --- a/ec/ecresource/deploymentresource/enterprisesearch/v2/schema.go +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/schema.go @@ -148,12 +148,12 @@ func EnterpriseSearchSchema() tfsdk.Attribute { }, "user_settings_yaml": { Type: types.StringType, - Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, Optional: true, }, "user_settings_override_yaml": { Type: types.StringType, - Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, Optional: true, }, }), diff --git a/ec/ecresource/deploymentresource/integrationsserver/v1/schema.go b/ec/ecresource/deploymentresource/integrationsserver/v1/schema.go index df10c37ef..69b3285ee 100644 --- a/ec/ecresource/deploymentresource/integrationsserver/v1/schema.go +++ b/ec/ecresource/deploymentresource/integrationsserver/v1/schema.go @@ -150,12 +150,12 @@ func IntegrationsServerSchema() tfsdk.Attribute { }, "user_settings_yaml": { Type: types.StringType, - Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, Optional: true, }, "user_settings_override_yaml": { Type: types.StringType, - Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, Optional: true, }, }), diff --git a/ec/ecresource/deploymentresource/integrationsserver/v2/schema.go b/ec/ecresource/deploymentresource/integrationsserver/v2/schema.go index 92a3c0c72..9c258afc9 100644 --- a/ec/ecresource/deploymentresource/integrationsserver/v2/schema.go +++ b/ec/ecresource/deploymentresource/integrationsserver/v2/schema.go @@ -107,17 +107,17 @@ func IntegrationsServerSchema() tfsdk.Attribute { }, }, "config": { - Description: `Optionally define the IntegrationsServer configuration options for the IntegrationsServer Server`, + Description: `Optionally define the Integrations Server configuration options for the IntegrationsServer Server`, Optional: true, Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ "docker_image": { Type: types.StringType, - Description: "Optionally override the docker image the IntegrationsServer nodes will use. Note that this field will only work for internal users only.", + Description: "Optionally override the docker image the Integrations Server nodes will use. Note that this field will only work for internal users only.", Optional: true, }, "debug_enabled": { Type: types.BoolType, - Description: `Optionally enable debug mode for IntegrationsServer servers - defaults to false`, + Description: `Optionally enable debug mode for Integrations Server instances - defaults to false`, Optional: true, Computed: true, PlanModifiers: []tfsdk.AttributePlanModifier{ @@ -136,12 +136,12 @@ func IntegrationsServerSchema() tfsdk.Attribute { }, "user_settings_yaml": { Type: types.StringType, - Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, Optional: true, }, "user_settings_override_yaml": { Type: types.StringType, - Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, Optional: true, }, }), diff --git a/ec/ecresource/deploymentresource/kibana/v1/schema.go b/ec/ecresource/deploymentresource/kibana/v1/schema.go index 8d775aae6..a28d447ff 100644 --- a/ec/ecresource/deploymentresource/kibana/v1/schema.go +++ b/ec/ecresource/deploymentresource/kibana/v1/schema.go @@ -140,12 +140,12 @@ func KibanaSchema() tfsdk.Attribute { }, "user_settings_yaml": { Type: types.StringType, - Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, Optional: true, }, "user_settings_override_yaml": { Type: types.StringType, - Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, Optional: true, }, }), diff --git a/ec/ecresource/deploymentresource/kibana/v2/schema.go b/ec/ecresource/deploymentresource/kibana/v2/schema.go index f7380236c..a9bcb885d 100644 --- a/ec/ecresource/deploymentresource/kibana/v2/schema.go +++ b/ec/ecresource/deploymentresource/kibana/v2/schema.go @@ -127,12 +127,12 @@ func KibanaSchema() tfsdk.Attribute { }, "user_settings_yaml": { Type: types.StringType, - Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, Optional: true, }, "user_settings_override_yaml": { Type: types.StringType, - Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, Optional: true, }, }), diff --git a/ec/internal/converters/extract_endpoint.go b/ec/internal/converters/extract_endpoint.go index 32077952c..be32ecdf8 100644 --- a/ec/internal/converters/extract_endpoint.go +++ b/ec/internal/converters/extract_endpoint.go @@ -25,7 +25,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" ) -// FlattenClusterEndpoint receives a ClusterMetadataInfo, parses the http and +// ExtractEndpointsToTypes receives a ClusterMetadataInfo, parses the http and // https endpoints and returns a map with two keys: `http_endpoint` and // `https_endpoint` func ExtractEndpointsToTypes(metadata *models.ClusterMetadataInfo) (httpEndpoint, httpsEndpoint types.String) { @@ -42,7 +42,7 @@ func ExtractEndpointsToTypes(metadata *models.ClusterMetadataInfo) (httpEndpoint return } -// FlattenClusterEndpoint receives a ClusterMetadataInfo, parses the http and +// ExtractEndpoints receives a ClusterMetadataInfo, parses the http and // https endpoints and returns a map with two keys: `http_endpoint` and // `https_endpoint` func ExtractEndpoints(metadata *models.ClusterMetadataInfo) (httpEndpoint, httpsEndpoint *string) { From 2fef8e6df1d953a1e75c8fe068f62f6c3196d615 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Mon, 23 Jan 2023 18:29:54 +0100 Subject: [PATCH 093/104] update comment --- .../elasticsearch/v2/elasticsearch_topology.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go index 688e9fab4..d216e89be 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go @@ -361,10 +361,6 @@ func elasticsearchTopologyAutoscalingPayload(ctx context.Context, autoObj attr.V } // expandAutoscalingDimension centralises processing of %_size and %_size_resource attributes -// Due to limitations in the Terraform SDK, it's not possible to specify a Default on a Computed schema member -// to work around this limitation, this function will default the %_size_resource attribute to `memory`. -// Without this default, setting autoscaling limits on tiers which do not have those limits in the deployment -// template leads to an API error due to the empty resource field on the TopologySize model. func expandAutoscalingDimension(autoscale v1.ElasticsearchTopologyAutoscalingTF, model *models.TopologySize, size, sizeResource types.String) error { if size.Value != "" { val, err := deploymentsize.ParseGb(size.Value) From 5e314652946e83f110e6e6c27b6dab57cb6ecd0a Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Tue, 24 Jan 2023 09:36:17 +0100 Subject: [PATCH 094/104] fix description --- ec/ecresource/deploymentresource/apm/v1/schema.go | 4 ++-- ec/ecresource/deploymentresource/apm/v2/schema.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ec/ecresource/deploymentresource/apm/v1/schema.go b/ec/ecresource/deploymentresource/apm/v1/schema.go index e5882f2df..07aaeb41c 100644 --- a/ec/ecresource/deploymentresource/apm/v1/schema.go +++ b/ec/ecresource/deploymentresource/apm/v1/schema.go @@ -102,12 +102,12 @@ func ApmConfigSchema() tfsdk.Attribute { }, "user_settings_yaml": { Type: types.StringType, - Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, Optional: true, }, "user_settings_override_yaml": { Type: types.StringType, - Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, Optional: true, }, }), diff --git a/ec/ecresource/deploymentresource/apm/v2/schema.go b/ec/ecresource/deploymentresource/apm/v2/schema.go index 2cd1f9ee2..d73d2d67e 100644 --- a/ec/ecresource/deploymentresource/apm/v2/schema.go +++ b/ec/ecresource/deploymentresource/apm/v2/schema.go @@ -55,12 +55,12 @@ func ApmConfigSchema() tfsdk.Attribute { }, "user_settings_yaml": { Type: types.StringType, - Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, Optional: true, }, "user_settings_override_yaml": { Type: types.StringType, - Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, Optional: true, }, }), From 75c16c385460f29d1e7428ed5dcb988298407403 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Tue, 24 Jan 2023 09:45:27 +0100 Subject: [PATCH 095/104] remove incorrect comments --- ec/internal/converters/extract_endpoint.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/ec/internal/converters/extract_endpoint.go b/ec/internal/converters/extract_endpoint.go index be32ecdf8..23f1723d4 100644 --- a/ec/internal/converters/extract_endpoint.go +++ b/ec/internal/converters/extract_endpoint.go @@ -25,9 +25,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" ) -// ExtractEndpointsToTypes receives a ClusterMetadataInfo, parses the http and -// https endpoints and returns a map with two keys: `http_endpoint` and -// `https_endpoint` func ExtractEndpointsToTypes(metadata *models.ClusterMetadataInfo) (httpEndpoint, httpsEndpoint types.String) { httpEndpointStr, httpsEndpointStr := ExtractEndpoints(metadata) @@ -42,9 +39,6 @@ func ExtractEndpointsToTypes(metadata *models.ClusterMetadataInfo) (httpEndpoint return } -// ExtractEndpoints receives a ClusterMetadataInfo, parses the http and -// https endpoints and returns a map with two keys: `http_endpoint` and -// `https_endpoint` func ExtractEndpoints(metadata *models.ClusterMetadataInfo) (httpEndpoint, httpsEndpoint *string) { if metadata == nil || metadata.Endpoint == "" || metadata.Ports == nil { return From 942263834dd7308726dc4354f42346abdd1894a9 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Tue, 24 Jan 2023 16:55:32 +0100 Subject: [PATCH 096/104] (WIP) use map for Elasticsearch topologies --- .../deployment/v2/deployment_read.go | 11 ++++----- .../elasticsearch/v2/elasticsearch_payload.go | 10 ++++---- .../v2/elasticsearch_topology.go | 23 ++++--------------- .../elasticsearch/v2/node_roles.go | 2 +- .../elasticsearch/v2/schema.go | 4 ++-- 5 files changed, 16 insertions(+), 34 deletions(-) diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_read.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_read.go index 7146934b8..9b0589061 100644 --- a/ec/ecresource/deploymentresource/deployment/v2/deployment_read.go +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_read.go @@ -74,12 +74,11 @@ func (dep *Deployment) NullifyUnusedEsTopologies(ctx context.Context, esPlan *el return diags } - planTopologiesSet := planTopology.AsSet() - - filteredTopologies := make(elasticsearchv2.ElasticsearchTopologies, 0, len(dep.Elasticsearch.Topology)) + filteredTopologies := make(elasticsearchv2.ElasticsearchTopologies, len(dep.Elasticsearch.Topology)) for _, tier := range dep.Elasticsearch.Topology { - planTier := planTopologiesSet[tier.Id] + _, exist := planTopology[tier.Id] + size, err := converters.ParseTopologySize(tier.Size, tier.SizeResource) if err != nil { @@ -94,11 +93,11 @@ func (dep *Deployment) NullifyUnusedEsTopologies(ctx context.Context, esPlan *el return diags } - if planTier == nil && *size.Value == 0 { + if !exist && *size.Value == 0 { continue } - filteredTopologies = append(filteredTopologies, tier) + filteredTopologies[tier.Id] = tier } dep.Elasticsearch.Topology = filteredTopologies diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go index 27941da48..47dd91abe 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go @@ -37,7 +37,7 @@ type ElasticsearchTF struct { CloudID types.String `tfsdk:"cloud_id"` HttpEndpoint types.String `tfsdk:"http_endpoint"` HttpsEndpoint types.String `tfsdk:"https_endpoint"` - Topology types.Set `tfsdk:"topology"` + Topology types.Map `tfsdk:"topology"` Config types.Object `tfsdk:"config"` RemoteCluster types.Set `tfsdk:"remote_cluster"` SnapshotSource types.Object `tfsdk:"snapshot_source"` @@ -119,8 +119,8 @@ func (es *ElasticsearchTF) payload(ctx context.Context, res *models.Elasticsearc return res, diags } -func (es *ElasticsearchTF) topologies(ctx context.Context) ([]*ElasticsearchTopologyTF, diag.Diagnostics) { - var topologies []*ElasticsearchTopologyTF +func (es *ElasticsearchTF) topologies(ctx context.Context) (map[string]ElasticsearchTopologyTF, diag.Diagnostics) { + var topologies map[string]ElasticsearchTopologyTF if diags := es.Topology.ElementsAs(ctx, &topologies, true); diags.HasError() { return nil, diags } @@ -136,9 +136,7 @@ func (es *ElasticsearchTF) topologiesPayload(ctx context.Context, topologyModels } for _, tier := range tiers { - if tier != nil { - diags.Append(tier.payload(ctx, tier.Id.Value, topologyModels)...) - } + diags.Append(tier.payload(ctx, tier.Id.Value, topologyModels)...) } return diags diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go index e9a7fae14..6a212d07e 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go @@ -22,7 +22,6 @@ import ( "encoding/json" "fmt" "reflect" - "sort" "strconv" "strings" @@ -68,17 +67,7 @@ type ElasticsearchTopology struct { type ElasticsearchTopologyAutoscaling v1.ElasticsearchTopologyAutoscaling -type ElasticsearchTopologiesTF []*ElasticsearchTopologyTF - -func (tops ElasticsearchTopologiesTF) AsSet() map[string]*ElasticsearchTopologyTF { - set := make(map[string]*ElasticsearchTopologyTF, len(tops)) - - for _, top := range tops { - set[top.Id.Value] = top - } - - return set -} +type ElasticsearchTopologiesTF map[string]ElasticsearchTopologyTF func (topology ElasticsearchTopologyTF) payload(ctx context.Context, topologyID string, planTopologies []*models.ElasticsearchClusterTopologyElement) diag.Diagnostics { var diags diag.Diagnostics @@ -127,7 +116,7 @@ func readElasticsearchTopologies(in *models.ElasticsearchClusterPlan) (Elasticse return nil, nil } - topology := make([]ElasticsearchTopology, 0, len(in.ClusterTopology)) + topology := make(map[string]ElasticsearchTopology, len(in.ClusterTopology)) for _, model := range in.ClusterTopology { tier, err := readElasticsearchTopology(model) @@ -135,14 +124,10 @@ func readElasticsearchTopologies(in *models.ElasticsearchClusterPlan) (Elasticse return nil, err } if tier.Id != "" { - topology = append(topology, *tier) + topology[tier.Id] = *tier } } - sort.Slice(topology, func(i, j int) bool { - return topology[i].Id < topology[j].Id - }) - return topology, nil } @@ -264,7 +249,7 @@ func (topology *ElasticsearchTopologyTF) HasNodeType() bool { return false } -type ElasticsearchTopologies []ElasticsearchTopology +type ElasticsearchTopologies map[string]ElasticsearchTopology func matchEsTopologyID(id string, topologies []*models.ElasticsearchClusterTopologyElement) (*models.ElasticsearchClusterTopologyElement, error) { for _, t := range topologies { diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go index 9db6baacf..fd2b5dd1f 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go @@ -112,7 +112,7 @@ func legacyToNodeRoles(ctx context.Context, stateVersion, planVersion types.Stri } for _, tier := range tiers { - if tier != nil && tier.HasNodeType() { + if tier.HasNodeType() { return false, nil } } diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go index 56939a299..9a94a8c75 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go @@ -357,10 +357,10 @@ func ElasticsearchTrustExternalSchema() tfsdk.Attribute { func ElasticsearchTopologySchema() tfsdk.Attribute { return tfsdk.Attribute{ Required: true, - // Optional: true, + // Optional: true, // Computed: true, Description: `Elasticsearch topology`, - Attributes: tfsdk.SetNestedAttributes(map[string]tfsdk.Attribute{ + Attributes: tfsdk.MapNestedAttributes(map[string]tfsdk.Attribute{ "id": { Type: types.StringType, Description: `Required topology ID from the deployment template`, From e2216104eeb6fdf4e01a3eec9da030c2dcb8d989 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Tue, 24 Jan 2023 16:57:55 +0100 Subject: [PATCH 097/104] Remove obsolete plan modifier for elasticsearch topology zone_count --- ec/ecresource/deploymentresource/elasticsearch/v2/schema.go | 1 - 1 file changed, 1 deletion(-) diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go index 386a0b438..fd86f9823 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go @@ -415,7 +415,6 @@ func elasticsearchTopologySchema(description string, required bool, topologyAttr Computed: true, Optional: true, PlanModifiers: tfsdk.AttributePlanModifiers{ - resource.UseStateForUnknown(), UseTopologyStateForUnknown(topologyAttributeName), }, }, From 89760d2514f9cf1cb26056f1b47a1c111739b299 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Tue, 24 Jan 2023 19:22:38 +0100 Subject: [PATCH 098/104] Fix and enable plan modifiers for topology attributes --- .../elasticsearch/v2/schema.go | 46 ++++++++++++++++++- .../v2/topology_plan_modifier.go | 13 ++++-- 2 files changed, 53 insertions(+), 6 deletions(-) diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go index 9a94a8c75..3cff52c43 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go @@ -96,7 +96,7 @@ func ElasticsearchSchema() tfsdk.Attribute { Computed: true, }, - "topology": ElasticsearchTopologySchema(), + "topology": elasticsearchTopologySchema(), "trust_account": ElasticsearchTrustAccountSchema(), @@ -172,29 +172,44 @@ func ElasticsearchTopologyAutoscalingSchema() tfsdk.Attribute { Type: types.StringType, Optional: true, Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseTopologyStateForUnknown(2), + }, }, "max_size": { Description: "Maximum size value for the maximum autoscaling setting.", Type: types.StringType, Optional: true, Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseTopologyStateForUnknown(2), + }, }, "min_size_resource": { Description: "Minimum resource type for the minimum autoscaling setting.", Type: types.StringType, Optional: true, Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseTopologyStateForUnknown(2), + }, }, "min_size": { Description: "Minimum size value for the minimum autoscaling setting.", Type: types.StringType, Optional: true, Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseTopologyStateForUnknown(2), + }, }, "policy_override_json": { Type: types.StringType, Description: "Computed policy overrides set directly via the API or other clients.", Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseTopologyStateForUnknown(2), + }, }, }), } @@ -354,7 +369,7 @@ func ElasticsearchTrustExternalSchema() tfsdk.Attribute { } } -func ElasticsearchTopologySchema() tfsdk.Attribute { +func elasticsearchTopologySchema() tfsdk.Attribute { return tfsdk.Attribute{ Required: true, // Optional: true, @@ -370,48 +385,72 @@ func ElasticsearchTopologySchema() tfsdk.Attribute { Type: types.StringType, Description: `Computed Instance Configuration ID of the topology element`, Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseTopologyStateForUnknown(1), + }, }, "size": { Type: types.StringType, Description: `Amount of "size_resource" per node in the "g" notation`, Computed: true, Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseTopologyStateForUnknown(1), + }, }, "size_resource": { Type: types.StringType, Description: `Size type, defaults to "memory".`, Optional: true, Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "memory"}), + }, }, "zone_count": { Type: types.Int64Type, Description: `Number of zones that the Elasticsearch cluster will span. This is used to set HA`, Computed: true, Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseTopologyStateForUnknown(1), + }, }, "node_type_data": { Type: types.StringType, Description: `The node type for the Elasticsearch Topology element (data node)`, Computed: true, Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseNodeTypesDefault(), + }, }, "node_type_master": { Type: types.StringType, Description: `The node type for the Elasticsearch Topology element (master node)`, Computed: true, Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseNodeTypesDefault(), + }, }, "node_type_ingest": { Type: types.StringType, Description: `The node type for the Elasticsearch Topology element (ingest node)`, Computed: true, Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseNodeTypesDefault(), + }, }, "node_type_ml": { Type: types.StringType, Description: `The node type for the Elasticsearch Topology element (machine learning node)`, Computed: true, Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseNodeTypesDefault(), + }, }, "node_roles": { Type: types.SetType{ @@ -419,6 +458,9 @@ func ElasticsearchTopologySchema() tfsdk.Attribute { }, Description: `The computed list of node roles for the current topology element`, Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseNodeRolesDefault(), + }, }, "autoscaling": ElasticsearchTopologyAutoscalingSchema(), }), diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier.go b/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier.go index 335853c98..a2773ec64 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier.go @@ -27,12 +27,12 @@ import ( ) // Use current state for a topology's attribute if the topology's state is not nil and the template attribute has not changed -func UseTopologyStateForUnknown(topologyAttributeName string) tfsdk.AttributePlanModifier { - return useTopologyState{topologyAttributeName: topologyAttributeName} +func UseTopologyStateForUnknown(topologyNestingLevel int) tfsdk.AttributePlanModifier { + return useTopologyState{topologyNestingLevel: topologyNestingLevel} } type useTopologyState struct { - topologyAttributeName string + topologyNestingLevel int } func (m useTopologyState) Modify(ctx context.Context, req tfsdk.ModifyAttributePlanRequest, resp *tfsdk.ModifyAttributePlanResponse) { @@ -49,9 +49,14 @@ func (m useTopologyState) Modify(ctx context.Context, req tfsdk.ModifyAttributeP return } + tierPath := req.AttributePath.ParentPath() + for i := m.topologyNestingLevel - 1; i > 0; i-- { + tierPath = tierPath.ParentPath() + } + // we check state of entire topology state instead of topology attributes states because nil can be a valid state for some topology attributes // e.g. `aws-io-optimized-v2` template doesn't specify `autoscaling_min` for `hot_content` so `min_size`'s state is nil - topologyStateDefined, diags := attributeStateDefined(ctx, path.Root("elasticsearch").AtName(m.topologyAttributeName), req) + topologyStateDefined, diags := attributeStateDefined(ctx, tierPath, req) resp.Diagnostics.Append(diags...) From 82e869caa4c6d54b9ecb28aa9b4919a8261a6e4e Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 25 Jan 2023 12:12:43 +0100 Subject: [PATCH 099/104] stop exporting a function --- ec/ecresource/deploymentresource/elasticsearch/v2/schema.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go index 99dcc1781..fbf881974 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go @@ -162,7 +162,7 @@ func elasticsearchConfigSchema() tfsdk.Attribute { } } -func ElasticsearchTopologyAutoscalingSchema() tfsdk.Attribute { +func elasticsearchTopologyAutoscalingSchema() tfsdk.Attribute { return tfsdk.Attribute{ Description: "Optional Elasticsearch autoscaling settings, such a maximum and minimum size and resources.", Required: true, @@ -462,7 +462,7 @@ func elasticsearchTopologySchema() tfsdk.Attribute { UseNodeRolesDefault(), }, }, - "autoscaling": ElasticsearchTopologyAutoscalingSchema(), + "autoscaling": elasticsearchTopologyAutoscalingSchema(), }), } } From 53397aac05e1ec1f34ce954f9a8adc44182756a1 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 25 Jan 2023 13:23:58 +0100 Subject: [PATCH 100/104] fix unit tests --- .../v2/deployment_create_payload_test.go | 153 +++++++----------- .../deployment/v2/deployment_read.go | 8 +- .../deployment/v2/deployment_read_test.go | 116 ++++++------- .../v2/deployment_update_payload_test.go | 147 ++++++++--------- .../deploymentresource/deployment_test.go | 9 +- .../elasticsearch/v2/elasticsearch_payload.go | 4 +- .../v2/elasticsearch_payload_test.go | 94 ++++------- .../v2/elasticsearch_read_test.go | 24 +-- .../v2/elasticsearch_test_utils.go | 2 +- .../v2/elasticsearch_topology.go | 9 +- .../elasticsearch/v2/node_roles_test.go | 3 +- .../elasticsearch/v2/schema.go | 15 +- 12 files changed, 234 insertions(+), 350 deletions(-) diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload_test.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload_test.go index 327c0dae1..e601e0a35 100644 --- a/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload_test.go +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload_test.go @@ -65,7 +65,7 @@ func Test_createRequest(t *testing.T) { defaultElasticsearch := &elasticsearchv2.Elasticsearch{ Topology: elasticsearchv2.ElasticsearchTopologies{ - *defaultHotTier, + "hot_content": *defaultHotTier, }, } @@ -129,8 +129,7 @@ func Test_createRequest(t *testing.T) { UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), }, Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("2g"), NodeRoles: []string{ "master", @@ -142,9 +141,8 @@ func Test_createRequest(t *testing.T) { }, ZoneCount: 1, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "warm", + }), + "warm": *elasticsearchv2.CreateTierForTest("warm", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("2g"), NodeRoles: []string{ "data_warm", @@ -152,7 +150,7 @@ func Test_createRequest(t *testing.T) { }, ZoneCount: 1, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, Kibana: sampleKibana, @@ -173,8 +171,7 @@ func Test_createRequest(t *testing.T) { UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), }, Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ InstanceConfigurationId: ec.String("aws.data.highio.i3"), Size: ec.String("2g"), NodeTypeData: ec.String("true"), @@ -183,7 +180,7 @@ func Test_createRequest(t *testing.T) { NodeTypeMl: ec.String("false"), ZoneCount: 1, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, } @@ -890,11 +887,10 @@ func Test_createRequest(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("4g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, Kibana: &kibanav2.Kibana{ @@ -1052,7 +1048,7 @@ func Test_createRequest(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Topology: elasticsearchv2.ElasticsearchTopologies{ - *defaultHotTier, + "hot_content": *defaultHotTier, }, }, Kibana: &kibanav2.Kibana{ @@ -1452,7 +1448,7 @@ func Test_createRequest(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Topology: elasticsearchv2.ElasticsearchTopologies{ - *defaultHotTier, + "hot_content": *defaultHotTier, }, Extension: elasticsearchv2.ElasticsearchExtensions{ { @@ -1608,21 +1604,18 @@ func Test_createRequest(t *testing.T) { RefId: ec.String("main-elasticsearch"), Autoscale: ec.Bool(true), Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "cold", + }), + "cold": *elasticsearchv2.CreateTierForTest("cold", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("2g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "warm", + }), + "warm": *elasticsearchv2.CreateTierForTest("warm", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("4g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, }, @@ -1756,25 +1749,22 @@ func Test_createRequest(t *testing.T) { RefId: ec.String("main-elasticsearch"), Autoscale: ec.Bool(true), Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{ MaxSize: ec.String("232g"), }, - }, - { - Id: "cold", + }), + "cold": *elasticsearchv2.CreateTierForTest("cold", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("2g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "warm", + }), + "warm": *elasticsearchv2.CreateTierForTest("warm", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("4g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{ MaxSize: ec.String("116g"), }, - }, + }), }, }, }, @@ -1907,27 +1897,23 @@ func Test_createRequest(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "cold", + }), + "cold": *elasticsearchv2.CreateTierForTest("cold", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("2g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "warm", + }), + "warm": *elasticsearchv2.CreateTierForTest("warm", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("4g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "master", + }), + "master": *elasticsearchv2.CreateTierForTest("master", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("1g"), ZoneCount: 3, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, }, @@ -2079,27 +2065,23 @@ func Test_createRequest(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "cold", + }), + "cold": *elasticsearchv2.CreateTierForTest("cold", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("2g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "warm", + }), + "warm": *elasticsearchv2.CreateTierForTest("warm", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("4g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "coordinating", + }), + "coordinating": *elasticsearchv2.CreateTierForTest("coordinating", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("2g"), ZoneCount: 2, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, }, @@ -2251,33 +2233,28 @@ func Test_createRequest(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "cold", + }), + "cold": *elasticsearchv2.CreateTierForTest("cold", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("2g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "warm", + }), + "warm": *elasticsearchv2.CreateTierForTest("warm", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("4g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "coordinating", + }), + "coordinating": *elasticsearchv2.CreateTierForTest("coordinating", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("2g"), ZoneCount: 2, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "master", + }), + "master": *elasticsearchv2.CreateTierForTest("master", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("1g"), ZoneCount: 3, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, }, @@ -2458,11 +2435,10 @@ func Test_createRequest(t *testing.T) { }, }, Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, Kibana: &kibanav2.Kibana{ @@ -2654,25 +2630,22 @@ func Test_createRequest(t *testing.T) { }, }, Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{ MaxSize: ec.String("232g"), }, - }, - { - Id: "cold", + }), + "cold": *elasticsearchv2.CreateTierForTest("cold", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("2g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "warm", + }), + "warm": *elasticsearchv2.CreateTierForTest("warm", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("4g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{ MaxSize: ec.String("116g"), }, - }, + }), }, }, }, @@ -2833,7 +2806,7 @@ func Test_createRequest(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Topology: elasticsearchv2.ElasticsearchTopologies{ - *defaultHotTier, + "hot_content": *defaultHotTier, }, }, Kibana: &kibanav2.Kibana{ @@ -2921,11 +2894,10 @@ func Test_createRequest(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, Tags: map[string]string{ @@ -3010,11 +2982,10 @@ func Test_createRequest(t *testing.T) { SourceElasticsearchClusterId: "8c63b87af9e24ea49b8a4bfe550e5fe9", }, Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, }, diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_read.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_read.go index 9b0589061..fa8ecd8cc 100644 --- a/ec/ecresource/deploymentresource/deployment/v2/deployment_read.go +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_read.go @@ -76,8 +76,8 @@ func (dep *Deployment) NullifyUnusedEsTopologies(ctx context.Context, esPlan *el filteredTopologies := make(elasticsearchv2.ElasticsearchTopologies, len(dep.Elasticsearch.Topology)) - for _, tier := range dep.Elasticsearch.Topology { - _, exist := planTopology[tier.Id] + for id, tier := range dep.Elasticsearch.Topology { + _, exist := planTopology[id] size, err := converters.ParseTopologySize(tier.Size, tier.SizeResource) @@ -89,7 +89,7 @@ func (dep *Deployment) NullifyUnusedEsTopologies(ctx context.Context, esPlan *el if size == nil || size.Value == nil { var diags diag.Diagnostics - diags.AddError("Cannot remove unused Elasticsearch topologies from backend response", fmt.Sprintf("the topology [%s] size is nil", tier.Id)) + diags.AddError("Cannot remove unused Elasticsearch topologies from backend response", fmt.Sprintf("the topology [%s] size is nil", id)) return diags } @@ -97,7 +97,7 @@ func (dep *Deployment) NullifyUnusedEsTopologies(ctx context.Context, esPlan *el continue } - filteredTopologies[tier.Id] = tier + filteredTopologies[id] = tier } dep.Elasticsearch.Topology = filteredTopologies diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_read_test.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_read_test.go index af05c53a5..c57b75abb 100644 --- a/ec/ecresource/deploymentresource/deployment/v2/deployment_read_test.go +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_read_test.go @@ -63,8 +63,7 @@ func Test_readDeployment(t *testing.T) { UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), }, Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ InstanceConfigurationId: ec.String("aws.data.highio.i3"), Size: ec.String("2g"), SizeResource: ec.String("memory"), @@ -74,7 +73,7 @@ func Test_readDeployment(t *testing.T) { NodeTypeMl: ec.String("false"), ZoneCount: 1, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, Kibana: &kibanav2.Kibana{ @@ -435,8 +434,7 @@ func Test_readDeployment(t *testing.T) { UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), }, Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ InstanceConfigurationId: ec.String("aws.data.highio.i3"), Size: ec.String("2g"), SizeResource: ec.String("memory"), @@ -446,7 +444,7 @@ func Test_readDeployment(t *testing.T) { NodeTypeMl: ec.String("false"), ZoneCount: 1, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, Kibana: &kibanav2.Kibana{ @@ -484,8 +482,7 @@ func Test_readDeployment(t *testing.T) { HttpEndpoint: ec.String("http://1238f19957874af69306787dca662154.eastus2.azure.elastic-cloud.com:9200"), HttpsEndpoint: ec.String("https://1238f19957874af69306787dca662154.eastus2.azure.elastic-cloud.com:9243"), Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ InstanceConfigurationId: ec.String("azure.data.highio.l32sv2"), Size: ec.String("4g"), SizeResource: ec.String("memory"), @@ -495,7 +492,7 @@ func Test_readDeployment(t *testing.T) { NodeTypeMl: ec.String("false"), ZoneCount: 2, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, Config: &elasticsearchv2.ElasticsearchConfig{}, }, @@ -546,8 +543,7 @@ func Test_readDeployment(t *testing.T) { HttpsEndpoint: ec.String("https://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9243"), Config: &elasticsearchv2.ElasticsearchConfig{}, Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ InstanceConfigurationId: ec.String("aws.data.highio.i3"), Size: ec.String("8g"), SizeResource: ec.String("memory"), @@ -557,7 +553,7 @@ func Test_readDeployment(t *testing.T) { NodeTypeMl: ec.String("false"), ZoneCount: 2, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, Kibana: &kibanav2.Kibana{ @@ -609,8 +605,7 @@ func Test_readDeployment(t *testing.T) { HttpsEndpoint: ec.String("https://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9243"), Config: &elasticsearchv2.ElasticsearchConfig{}, Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ InstanceConfigurationId: ec.String("aws.data.highio.i3"), Size: ec.String("8g"), SizeResource: ec.String("memory"), @@ -620,7 +615,7 @@ func Test_readDeployment(t *testing.T) { NodeTypeMl: ec.String("false"), ZoneCount: 2, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, Extension: elasticsearchv2.ElasticsearchExtensions{ { @@ -755,12 +750,11 @@ func Test_readDeployment(t *testing.T) { Region: ec.String("aws-eu-central-1"), Config: &elasticsearchv2.ElasticsearchConfig{}, Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("4g"), SizeResource: ec.String("memory"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, TrustAccount: elasticsearchv2.ElasticsearchTrustAccounts{ { @@ -841,12 +835,11 @@ func Test_readDeployment(t *testing.T) { Region: ec.String("aws-eu-central-1"), Config: &elasticsearchv2.ElasticsearchConfig{}, Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("4g"), SizeResource: ec.String("memory"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, }, @@ -989,13 +982,12 @@ func Test_readDeployment(t *testing.T) { DockerImage: ec.String("docker.elastic.com/elasticsearch/cloud:7.14.1-hash"), }, Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("4g"), SizeResource: ec.String("memory"), ZoneCount: 1, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, Kibana: &kibanav2.Kibana{ @@ -1066,8 +1058,7 @@ func Test_readDeployment(t *testing.T) { HttpsEndpoint: ec.String("https://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9243"), Config: &elasticsearchv2.ElasticsearchConfig{}, Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ InstanceConfigurationId: ec.String("aws.data.highio.i3"), Size: ec.String("8g"), SizeResource: ec.String("memory"), @@ -1077,7 +1068,7 @@ func Test_readDeployment(t *testing.T) { NodeTypeMl: ec.String("false"), ZoneCount: 2, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, Kibana: &kibanav2.Kibana{ @@ -1127,8 +1118,7 @@ func Test_readDeployment(t *testing.T) { HttpsEndpoint: ec.String("https://123695e76d914005bf90b717e668ad4b.asia-east1.gcp.elastic-cloud.com:9243"), Config: &elasticsearchv2.ElasticsearchConfig{}, Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ InstanceConfigurationId: ec.String("gcp.data.highio.1"), Size: ec.String("8g"), SizeResource: ec.String("memory"), @@ -1138,7 +1128,7 @@ func Test_readDeployment(t *testing.T) { NodeTypeMl: ec.String("false"), ZoneCount: 2, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, Kibana: &kibanav2.Kibana{ @@ -1188,8 +1178,7 @@ func Test_readDeployment(t *testing.T) { HttpsEndpoint: ec.String("https://123695e76d914005bf90b717e668ad4b.asia-east1.gcp.elastic-cloud.com:9243"), Config: &elasticsearchv2.ElasticsearchConfig{}, Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "coordinating", + "coordinating": *elasticsearchv2.CreateTierForTest("coordinating", elasticsearchv2.ElasticsearchTopology{ InstanceConfigurationId: ec.String("gcp.coordinating.1"), Size: ec.String("0g"), SizeResource: ec.String("memory"), @@ -1199,9 +1188,8 @@ func Test_readDeployment(t *testing.T) { NodeTypeMl: ec.String("false"), ZoneCount: 2, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "hot_content", + }), + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ InstanceConfigurationId: ec.String("gcp.data.highio.1"), Size: ec.String("8g"), SizeResource: ec.String("memory"), @@ -1215,9 +1203,8 @@ func Test_readDeployment(t *testing.T) { MaxSizeResource: ec.String("memory"), PolicyOverrideJson: ec.String(`{"proactive_storage":{"forecast_window":"3 h"}}`), }, - }, - { - Id: "master", + }), + "master": *elasticsearchv2.CreateTierForTest("master", elasticsearchv2.ElasticsearchTopology{ InstanceConfigurationId: ec.String("gcp.master.1"), Size: ec.String("0g"), SizeResource: ec.String("memory"), @@ -1227,9 +1214,8 @@ func Test_readDeployment(t *testing.T) { NodeTypeMl: ec.String("false"), ZoneCount: 3, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "ml", + }), + "ml": *elasticsearchv2.CreateTierForTest("ml", elasticsearchv2.ElasticsearchTopology{ InstanceConfigurationId: ec.String("gcp.ml.1"), Size: ec.String("1g"), SizeResource: ec.String("memory"), @@ -1244,7 +1230,7 @@ func Test_readDeployment(t *testing.T) { MinSize: ec.String("1g"), MinSizeResource: ec.String("memory"), }, - }, + }), }, }, Kibana: &kibanav2.Kibana{ @@ -1293,8 +1279,7 @@ func Test_readDeployment(t *testing.T) { HttpsEndpoint: ec.String("https://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9243"), Config: &elasticsearchv2.ElasticsearchConfig{}, Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "coordinating", + "coordinating": *elasticsearchv2.CreateTierForTest("coordinating", elasticsearchv2.ElasticsearchTopology{ InstanceConfigurationId: ec.String("gcp.coordinating.1"), Size: ec.String("0g"), SizeResource: ec.String("memory"), @@ -1304,9 +1289,8 @@ func Test_readDeployment(t *testing.T) { NodeTypeMl: ec.String("false"), ZoneCount: 2, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "hot_content", + }), + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ InstanceConfigurationId: ec.String("gcp.data.highio.1"), Size: ec.String("4g"), SizeResource: ec.String("memory"), @@ -1316,10 +1300,8 @@ func Test_readDeployment(t *testing.T) { NodeTypeMl: ec.String("false"), ZoneCount: 2, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - - Id: "warm", + }), + "warm": *elasticsearchv2.CreateTierForTest("warm", elasticsearchv2.ElasticsearchTopology{ InstanceConfigurationId: ec.String("gcp.data.highstorage.1"), Size: ec.String("4g"), SizeResource: ec.String("memory"), @@ -1329,7 +1311,7 @@ func Test_readDeployment(t *testing.T) { NodeTypeMl: ec.String("false"), ZoneCount: 2, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, Kibana: &kibanav2.Kibana{ @@ -1378,17 +1360,15 @@ func Test_readDeployment(t *testing.T) { HttpsEndpoint: ec.String("https://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9243"), Config: &elasticsearchv2.ElasticsearchConfig{}, Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "coordinating", + "coordinating": *elasticsearchv2.CreateTierForTest("coordinating", elasticsearchv2.ElasticsearchTopology{ InstanceConfigurationId: ec.String("gcp.coordinating.1"), Size: ec.String("0g"), SizeResource: ec.String("memory"), ZoneCount: 2, NodeRoles: []string{"ingest", "remote_cluster_client"}, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "hot_content", + }), + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ InstanceConfigurationId: ec.String("gcp.data.highio.1"), Size: ec.String("4g"), SizeResource: ec.String("memory"), @@ -1402,27 +1382,24 @@ func Test_readDeployment(t *testing.T) { "data_content", }, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "master", + }), + "master": *elasticsearchv2.CreateTierForTest("master", elasticsearchv2.ElasticsearchTopology{ InstanceConfigurationId: ec.String("gcp.master.1"), Size: ec.String("0g"), SizeResource: ec.String("memory"), ZoneCount: 3, NodeRoles: []string{"master"}, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "ml", + }), + "ml": *elasticsearchv2.CreateTierForTest("ml", elasticsearchv2.ElasticsearchTopology{ InstanceConfigurationId: ec.String("gcp.ml.1"), Size: ec.String("0g"), SizeResource: ec.String("memory"), ZoneCount: 1, NodeRoles: []string{"ml", "remote_cluster_client"}, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "warm", + }), + "warm": *elasticsearchv2.CreateTierForTest("warm", elasticsearchv2.ElasticsearchTopology{ InstanceConfigurationId: ec.String("gcp.data.highstorage.1"), Size: ec.String("4g"), SizeResource: ec.String("memory"), @@ -1432,7 +1409,7 @@ func Test_readDeployment(t *testing.T) { "remote_cluster_client", }, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, Kibana: &kibanav2.Kibana{ @@ -1507,8 +1484,7 @@ func Test_readDeployment(t *testing.T) { }, }, Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ InstanceConfigurationId: ec.String("aws.ccs.r5d"), Size: ec.String("1g"), SizeResource: ec.String("memory"), @@ -1518,7 +1494,7 @@ func Test_readDeployment(t *testing.T) { NodeTypeMaster: ec.String("true"), NodeTypeMl: ec.String("false"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, Kibana: &kibanav2.Kibana{ diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload_test.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload_test.go index c36a98622..9d0e67053 100644 --- a/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload_test.go +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload_test.go @@ -47,7 +47,7 @@ func Test_updateResourceToModel(t *testing.T) { defaultElasticsearch := &elasticsearchv2.Elasticsearch{ Topology: elasticsearchv2.ElasticsearchTopologies{ - *defaultHotTier, + "hot_content": *defaultHotTier, }, } @@ -99,8 +99,7 @@ func Test_updateResourceToModel(t *testing.T) { UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), }, Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ InstanceConfigurationId: ec.String("aws.data.highio.i3"), Size: ec.String("2g"), NodeTypeData: ec.String("true"), @@ -109,7 +108,7 @@ func Test_updateResourceToModel(t *testing.T) { NodeTypeMl: ec.String("false"), ZoneCount: 1, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, Kibana: &kibanav2.Kibana{ @@ -466,11 +465,10 @@ func Test_updateResourceToModel(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("4g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, Kibana: &kibanav2.Kibana{ @@ -744,7 +742,7 @@ func Test_updateResourceToModel(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Topology: elasticsearchv2.ElasticsearchTopologies{ - *defaultHotTier, + "hot_content": *defaultHotTier, }, }, Kibana: &kibanav2.Kibana{ @@ -770,8 +768,7 @@ func Test_updateResourceToModel(t *testing.T) { UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), }, Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ InstanceConfigurationId: ec.String("aws.data.highio.i3"), Size: ec.String("2g"), NodeTypeData: ec.String("true"), @@ -780,7 +777,7 @@ func Test_updateResourceToModel(t *testing.T) { NodeTypeMl: ec.String("false"), ZoneCount: 1, Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, Kibana: &kibanav2.Kibana{ @@ -906,8 +903,10 @@ func Test_updateResourceToModel(t *testing.T) { Region: "us-east-1", Version: "7.9.2", Elasticsearch: &elasticsearchv2.Elasticsearch{ - RefId: ec.String("main-elasticsearch"), - Topology: elasticsearchv2.ElasticsearchTopologies{*defaultHotTier}, + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *defaultHotTier, + }, }, Kibana: &kibanav2.Kibana{ ElasticsearchClusterRefId: ec.String("main-elasticsearch"), @@ -923,16 +922,14 @@ func Test_updateResourceToModel(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("16g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "coordinating", + }), + "coordinating": *elasticsearchv2.CreateTierForTest("coordinating", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("16g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, Kibana: &kibanav2.Kibana{ @@ -1029,8 +1026,10 @@ func Test_updateResourceToModel(t *testing.T) { Region: "us-east-1", Version: "7.9.2", Elasticsearch: &elasticsearchv2.Elasticsearch{ - RefId: ec.String("main-elasticsearch"), - Topology: elasticsearchv2.ElasticsearchTopologies{*defaultHotTier}, + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *defaultHotTier, + }, }, Kibana: &kibanav2.Kibana{ ElasticsearchClusterRefId: ec.String("main-elasticsearch"), @@ -1054,16 +1053,14 @@ func Test_updateResourceToModel(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("16g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "coordinating", + }), + "coordinating": *elasticsearchv2.CreateTierForTest("coordinating", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("16g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, Kibana: &kibanav2.Kibana{ @@ -1209,15 +1206,14 @@ func Test_updateResourceToModel(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("16g"), NodeTypeData: ec.String("true"), NodeTypeIngest: ec.String("true"), NodeTypeMaster: ec.String("true"), NodeTypeMl: ec.String("false"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, }, @@ -1230,15 +1226,14 @@ func Test_updateResourceToModel(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("16g"), NodeTypeData: ec.String("true"), NodeTypeIngest: ec.String("true"), NodeTypeMaster: ec.String("true"), NodeTypeMl: ec.String("false"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, }, @@ -1312,14 +1307,13 @@ func Test_updateResourceToModel(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("16g"), NodeTypeData: ec.String("true"), NodeTypeIngest: ec.String("true"), NodeTypeMaster: ec.String("true"), NodeTypeMl: ec.String("false"), - }, + }), }, }, }, @@ -1332,14 +1326,13 @@ func Test_updateResourceToModel(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("16g"), NodeTypeData: ec.String("true"), NodeTypeIngest: ec.String("true"), NodeTypeMaster: ec.String("true"), NodeTypeMl: ec.String("false"), - }, + }), }, }, }, @@ -1415,15 +1408,14 @@ func Test_updateResourceToModel(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("32g"), NodeTypeData: ec.String("true"), NodeTypeIngest: ec.String("true"), NodeTypeMaster: ec.String("true"), NodeTypeMl: ec.String("false"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, }, @@ -1436,15 +1428,14 @@ func Test_updateResourceToModel(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("16g"), NodeTypeData: ec.String("true"), NodeTypeIngest: ec.String("true"), NodeTypeMaster: ec.String("true"), NodeTypeMl: ec.String("false"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, }, @@ -1520,20 +1511,19 @@ func Test_updateResourceToModel(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("16g"), NodeTypeData: ec.String("true"), NodeTypeIngest: ec.String("true"), NodeTypeMaster: ec.String("true"), NodeTypeMl: ec.String("false"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "warm", + }), + "warm": *elasticsearchv2.CreateTierForTest("warm", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, }, @@ -1546,15 +1536,14 @@ func Test_updateResourceToModel(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("16g"), NodeTypeData: ec.String("true"), NodeTypeIngest: ec.String("true"), NodeTypeMaster: ec.String("true"), NodeTypeMl: ec.String("false"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, }, @@ -1659,16 +1648,14 @@ func Test_updateResourceToModel(t *testing.T) { RefId: ec.String("main-elasticsearch"), Autoscale: ec.Bool(true), Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("16g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "warm", + }), + "warm": *elasticsearchv2.CreateTierForTest("warm", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, }, @@ -1682,11 +1669,10 @@ func Test_updateResourceToModel(t *testing.T) { RefId: ec.String("main-elasticsearch"), Autoscale: ec.Bool(true), Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("16g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, }, @@ -1790,11 +1776,10 @@ func Test_updateResourceToModel(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, Tags: map[string]string{ @@ -1812,11 +1797,10 @@ func Test_updateResourceToModel(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, }, @@ -1894,11 +1878,10 @@ func Test_updateResourceToModel(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, SnapshotSource: &elasticsearchv2.ElasticsearchSnapshotSource{ SourceElasticsearchClusterId: "8c63b87af9e24ea49b8a4bfe550e5fe9", @@ -1914,11 +1897,10 @@ func Test_updateResourceToModel(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, }, @@ -2001,11 +1983,10 @@ func Test_updateResourceToModel(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("8g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, }, @@ -2092,16 +2073,14 @@ func Test_updateResourceToModel(t *testing.T) { Elasticsearch: &elasticsearchv2.Elasticsearch{ RefId: ec.String("main-elasticsearch"), Topology: elasticsearchv2.ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("16g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, - { - Id: "coordinating", + }), + "coordinating": *elasticsearchv2.CreateTierForTest("coordinating", elasticsearchv2.ElasticsearchTopology{ Size: ec.String("16g"), Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, - }, + }), }, }, Kibana: &kibanav2.Kibana{ diff --git a/ec/ecresource/deploymentresource/deployment_test.go b/ec/ecresource/deploymentresource/deployment_test.go index 64e141b90..d4c2804a4 100644 --- a/ec/ecresource/deploymentresource/deployment_test.go +++ b/ec/ecresource/deploymentresource/deployment_test.go @@ -49,10 +49,11 @@ func Test_createDeploymentWithEmptyFields(t *testing.T) { version = "8.4.3" elasticsearch = { - config = {} - hot = { - size = "8g" - autoscaling = {} + topology = { + "hot_content" = { + size = "8g" + autoscaling = {} + } } } }`, diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go index 47dd91abe..c9b263e40 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go @@ -135,8 +135,8 @@ func (es *ElasticsearchTF) topologiesPayload(ctx context.Context, topologyModels return diags } - for _, tier := range tiers { - diags.Append(tier.payload(ctx, tier.Id.Value, topologyModels)...) + for id, tier := range tiers { + diags.Append(tier.payload(ctx, id, topologyModels)...) } return diags diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go index fc81fec36..e31e8c215 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go @@ -112,8 +112,7 @@ func Test_writeElasticsearch(t *testing.T) { ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), Topology: ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": { Size: ec.String("2g"), ZoneCount: 1, }, @@ -180,8 +179,7 @@ func Test_writeElasticsearch(t *testing.T) { ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), Topology: ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": { Size: ec.String("2g"), ZoneCount: 1, }, @@ -251,8 +249,7 @@ func Test_writeElasticsearch(t *testing.T) { ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), Topology: ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": { Size: ec.String("2g"), ZoneCount: 1, NodeRoles: []string{"a", "b", "c"}, @@ -379,13 +376,11 @@ func Test_writeElasticsearch(t *testing.T) { ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), Topology: ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": { Size: ec.String("2g"), ZoneCount: 1, }, - { - Id: "warm", + "warm": { Size: ec.String("2g"), ZoneCount: 1, }, @@ -486,13 +481,11 @@ func Test_writeElasticsearch(t *testing.T) { UserSettingsYaml: ec.String("somesetting: true"), }, Topology: ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": { Size: ec.String("2g"), ZoneCount: 1, }, - { - Id: "warm", + "warm": { Size: ec.String("2g"), ZoneCount: 1, }, @@ -683,15 +676,13 @@ func Test_writeElasticsearch(t *testing.T) { ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), Topology: ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": { NodeTypeData: ec.String("false"), NodeTypeMaster: ec.String("false"), NodeTypeIngest: ec.String("false"), NodeTypeMl: ec.String("true"), }, - { - Id: "warm", + "warm": { NodeTypeMaster: ec.String("true"), }, }, @@ -789,19 +780,16 @@ func Test_writeElasticsearch(t *testing.T) { ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), Topology: ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": { NodeTypeData: ec.String("false"), NodeTypeMaster: ec.String("false"), NodeTypeIngest: ec.String("false"), NodeTypeMl: ec.String("true"), }, - { - Id: "warm", + "warm": { NodeTypeMaster: ec.String("true"), }, - { - Id: "cold", + "cold": { Size: ec.String("2g"), }, }, @@ -929,16 +917,9 @@ func Test_writeElasticsearch(t *testing.T) { ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), Topology: ElasticsearchTopologies{ - { - Id: "hot_content", - }, - { - Id: "warm", - }, - { - Id: "cold", - Size: ec.String("2g"), - }, + "hot_content": {}, + "warm": {}, + "cold": {Size: ec.String("2g")}, }, }, template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-hot-warm-v2.json"), @@ -1064,27 +1045,23 @@ func Test_writeElasticsearch(t *testing.T) { ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), Topology: ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": { Autoscaling: &ElasticsearchTopologyAutoscaling{ MaxSize: ec.String("58g"), }, }, - { - Id: "warm", + "warm": { Autoscaling: &ElasticsearchTopologyAutoscaling{ MaxSize: ec.String("29g"), }, }, - { - Id: "cold", + "cold": { Size: ec.String("2g"), Autoscaling: &ElasticsearchTopologyAutoscaling{ MaxSize: ec.String("29g"), }, }, - { - Id: "ml", + "ml": { Size: ec.String("1g"), Autoscaling: &ElasticsearchTopologyAutoscaling{ MaxSize: ec.String("29g"), @@ -1243,15 +1220,13 @@ func Test_writeElasticsearch(t *testing.T) { ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), Topology: ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": { Autoscaling: &ElasticsearchTopologyAutoscaling{ MaxSize: ec.String("450g"), MinSize: ec.String("2g"), }, }, - { - Id: "master", + "master": { Autoscaling: &ElasticsearchTopologyAutoscaling{ MaxSize: ec.String("250g"), MinSize: ec.String("1g"), @@ -1357,22 +1332,19 @@ func Test_writeElasticsearch(t *testing.T) { ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), Topology: ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": { Autoscaling: &ElasticsearchTopologyAutoscaling{ MaxSize: ec.String("450g"), MaxSizeResource: ec.String("storage"), }, }, - { - Id: "warm", + "warm": { Autoscaling: &ElasticsearchTopologyAutoscaling{ MaxSize: ec.String("870g"), MaxSizeResource: ec.String("storage"), }, }, - { - Id: "cold", + "cold": { Size: ec.String("4g"), Autoscaling: &ElasticsearchTopologyAutoscaling{ MaxSize: ec.String("1740g"), @@ -1516,8 +1488,7 @@ func Test_writeElasticsearch(t *testing.T) { Plugins: []string{"plugin"}, }, Topology: ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": { Size: ec.String("2g"), ZoneCount: 1, }, @@ -1595,8 +1566,7 @@ func Test_writeElasticsearch(t *testing.T) { SourceElasticsearchClusterId: mock.ValidClusterID, }, Topology: ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": { Size: ec.String("2g"), ZoneCount: 1, }, @@ -1667,8 +1637,7 @@ func Test_writeElasticsearch(t *testing.T) { ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), Topology: ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": { Size: ec.String("2g"), ZoneCount: 1, }, @@ -1741,8 +1710,7 @@ func Test_writeElasticsearch(t *testing.T) { ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), Topology: ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": { Size: ec.String("2g"), ZoneCount: 1, }, @@ -1815,8 +1783,7 @@ func Test_writeElasticsearch(t *testing.T) { ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), Topology: ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": { Size: ec.String("2g"), ZoneCount: 1, }, @@ -1889,8 +1856,7 @@ func Test_writeElasticsearch(t *testing.T) { ResourceId: ec.String(mock.ValidClusterID), Region: ec.String("some-region"), Topology: ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": { Size: ec.String("2g"), ZoneCount: 1, }, diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go index 81fdbe79c..d300a9c3e 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go @@ -158,8 +158,8 @@ func Test_readElasticsearch(t *testing.T) { HttpsEndpoint: ec.String("https://somecluster.cloud.elastic.co:9243"), Config: &ElasticsearchConfig{}, Topology: ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": { + id: "hot_content", InstanceConfigurationId: ec.String("aws.data.highio.i3"), Size: ec.String("2g"), SizeResource: ec.String("memory"), @@ -239,8 +239,8 @@ func Test_readElasticsearch(t *testing.T) { UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), }, Topology: ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": { + id: "hot_content", InstanceConfigurationId: ec.String("aws.data.highio.i3"), Size: ec.String("2g"), SizeResource: ec.String("memory"), @@ -306,16 +306,16 @@ func Test_readElasticsearchTopology(t *testing.T) { }, }}, want: ElasticsearchTopologies{ - { - Id: "coordinating", + "coordinating": { + id: "coordinating", InstanceConfigurationId: ec.String("aws.coordinating.m5"), Size: ec.String("0g"), SizeResource: ec.String("memory"), ZoneCount: 2, Autoscaling: &ElasticsearchTopologyAutoscaling{}, }, - { - Id: "hot_content", + "hot_content": { + id: "hot_content", InstanceConfigurationId: ec.String("aws.data.highio.i3"), Size: ec.String("4g"), SizeResource: ec.String("memory"), @@ -362,8 +362,8 @@ func Test_readElasticsearchTopology(t *testing.T) { }, }}, want: ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": { + id: "hot_content", InstanceConfigurationId: ec.String("aws.data.highio.i3"), Size: ec.String("4g"), SizeResource: ec.String("memory"), @@ -373,8 +373,8 @@ func Test_readElasticsearchTopology(t *testing.T) { NodeTypeMaster: ec.String("true"), Autoscaling: &ElasticsearchTopologyAutoscaling{}, }, - { - Id: "ml", + "ml": { + id: "ml", InstanceConfigurationId: ec.String("aws.ml.m5"), Size: ec.String("0g"), SizeResource: ec.String("memory"), diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_test_utils.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_test_utils.go index 070fb420c..c007c4823 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_test_utils.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_test_utils.go @@ -21,7 +21,7 @@ import "github.com/elastic/cloud-sdk-go/pkg/models" func CreateTierForTest(tierId string, tier ElasticsearchTopology) *ElasticsearchTopology { res := tier - res.Id = tierId + res.id = tierId return &res } diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go index b7f06b363..29c7abd6b 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go @@ -38,7 +38,6 @@ import ( ) type ElasticsearchTopologyTF struct { - Id types.String `tfsdk:"id"` InstanceConfigurationId types.String `tfsdk:"instance_configuration_id"` Size types.String `tfsdk:"size"` SizeResource types.String `tfsdk:"size_resource"` @@ -52,7 +51,7 @@ type ElasticsearchTopologyTF struct { } type ElasticsearchTopology struct { - Id string `tfsdk:"id"` + id string InstanceConfigurationId *string `tfsdk:"instance_configuration_id"` Size *string `tfsdk:"size"` SizeResource *string `tfsdk:"size_resource"` @@ -123,8 +122,8 @@ func readElasticsearchTopologies(in *models.ElasticsearchClusterPlan) (Elasticse if err != nil { return nil, err } - if tier.Id != "" { - topology[tier.Id] = *tier + if tier.id != "" { + topology[tier.id] = *tier } } @@ -134,7 +133,7 @@ func readElasticsearchTopologies(in *models.ElasticsearchClusterPlan) (Elasticse func readElasticsearchTopology(model *models.ElasticsearchClusterTopologyElement) (*ElasticsearchTopology, error) { var topology ElasticsearchTopology - topology.Id = model.ID + topology.id = model.ID if model.InstanceConfigurationID != "" { topology.InstanceConfigurationId = &model.InstanceConfigurationID diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_test.go index 615a51400..fbfd7efc8 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_test.go @@ -130,8 +130,7 @@ func Test_UseNodeRoles(t *testing.T) { planVersion: "7.12.0", elasticsearch: Elasticsearch{ Topology: ElasticsearchTopologies{ - { - Id: "hot_content", + "hot_content": { NodeTypeData: ec.String("true"), NodeTypeMaster: ec.String("true"), NodeTypeIngest: ec.String("true"), diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go index fbf881974..c634459b1 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go @@ -223,16 +223,14 @@ func ElasticsearchRemoteClusterSchema() tfsdk.Attribute { "deployment_id": { Description: "Remote deployment ID", Type: types.StringType, - // TODO fix examples/deployment_css/deployment.tf#61 - // Validators: []tfsdk.AttributeValidator{validators.Length(32, 32)}, - Required: true, + Validators: []tfsdk.AttributeValidator{stringvalidator.LengthBetween(32, 32)}, + Required: true, }, "alias": { Description: "Alias for this Cross Cluster Search binding", Type: types.StringType, - // TODO fix examples/deployment_css/deployment.tf#62 - // Validators: []tfsdk.AttributeValidator{validators.NotEmpty()}, - Required: true, + Validators: []tfsdk.AttributeValidator{stringvalidator.NoneOf("")}, + Required: true, }, "ref_id": { Description: `Remote elasticsearch "ref_id", it is best left to the default value`, @@ -376,11 +374,6 @@ func elasticsearchTopologySchema() tfsdk.Attribute { // Computed: true, Description: `Elasticsearch topology`, Attributes: tfsdk.MapNestedAttributes(map[string]tfsdk.Attribute{ - "id": { - Type: types.StringType, - Description: `Required topology ID from the deployment template`, - Required: true, - }, "instance_configuration_id": { Type: types.StringType, Description: `Computed Instance Configuration ID of the topology element`, From 189d2d961fd3a1acd917d39735ef8f8debe8acf2 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 25 Jan 2023 15:26:38 +0100 Subject: [PATCH 101/104] fix acceptance tests and examples --- ec/acc/datasource_deployment_basic_test.go | 10 +-- ec/acc/deployment_autoscaling_test.go | 62 +++++++------- ec/acc/deployment_basic_defaults_test.go | 60 +++++++------- ec/acc/deployment_basic_tags_test.go | 40 +++++----- ec/acc/deployment_basic_test.go | 4 +- ec/acc/deployment_ccs_test.go | 54 ++++++------- ec/acc/deployment_compute_optimized_test.go | 20 ++--- ec/acc/deployment_dedicated_test.go | 80 +++++++++---------- ec/acc/deployment_enterprise_search_test.go | 20 ++--- ec/acc/deployment_hotwarm_test.go | 40 +++++----- ec/acc/deployment_memory_optimized_test.go | 20 ++--- ec/acc/deployment_observability_tpl_test.go | 18 ++--- .../deployment_post_node_role_upgrade_test.go | 20 ++--- ...deployment_pre_node_role_migration_test.go | 56 ++++++------- ec/acc/deployment_security_test.go | 20 ++--- .../testdata/datasource_deployment_basic.tf | 22 ++--- ec/acc/testdata/datasource_stack_latest.tf | 2 +- ec/acc/testdata/datasource_stack_regex.tf | 2 +- ec/acc/testdata/datasource_tags.tf | 12 +-- ec/acc/testdata/deployment_autoscaling_1.tf | 58 +++++++------- ec/acc/testdata/deployment_autoscaling_2.tf | 58 +++++++------- ec/acc/testdata/deployment_basic.tf | 10 ++- .../testdata/deployment_basic_defaults_1.tf | 9 ++- .../testdata/deployment_basic_defaults_2.tf | 8 +- .../testdata/deployment_basic_defaults_3.tf | 10 ++- .../deployment_basic_defaults_hw_1.tf | 10 ++- .../deployment_basic_defaults_hw_2.tf | 12 +-- .../deployment_basic_integrations_server_1.tf | 6 +- .../deployment_basic_integrations_server_2.tf | 6 +- .../deployment_basic_settings_config_1.tf | 10 ++- .../deployment_basic_settings_config_2.tf | 10 ++- ...deployment_basic_settings_config_import.tf | 60 +++++++------- ec/acc/testdata/deployment_basic_tags_1.tf | 10 ++- ec/acc/testdata/deployment_basic_tags_2.tf | 10 ++- ec/acc/testdata/deployment_basic_tags_3.tf | 10 ++- ec/acc/testdata/deployment_basic_tags_4.tf | 10 ++- .../deployment_basic_with_traffic_filter_2.tf | 8 +- .../deployment_basic_with_traffic_filter_3.tf | 8 +- ec/acc/testdata/deployment_ccs_1.tf | 16 ++-- ec/acc/testdata/deployment_ccs_2.tf | 10 ++- .../deployment_compute_optimized_1.tf | 8 +- .../deployment_compute_optimized_2.tf | 10 ++- .../deployment_dedicated_coordinating.tf | 32 ++++---- .../testdata/deployment_dedicated_master.tf | 42 +++++----- .../deployment_docker_image_override.tf | 10 ++- .../deployment_elasticsearch_keystore_1.tf | 10 ++- ...yment_elasticsearch_keystore_1_migrated.tf | 10 ++- .../deployment_elasticsearch_keystore_2.tf | 10 ++- .../deployment_elasticsearch_keystore_3.tf | 10 ++- .../deployment_elasticsearch_keystore_4.tf | 10 ++- ...ployment_elasticsearch_keystore_creds.json | 2 +- ec/acc/testdata/deployment_emptyconfig.tf | 12 +-- .../deployment_enterprise_search_1.tf | 8 +- .../deployment_enterprise_search_2.tf | 10 ++- ec/acc/testdata/deployment_hotwarm_1.tf | 14 ++-- ec/acc/testdata/deployment_hotwarm_2.tf | 23 +++--- .../testdata/deployment_memory_optimized_1.tf | 8 +- .../testdata/deployment_memory_optimized_2.tf | 10 ++- ec/acc/testdata/deployment_observability_1.tf | 22 ++--- ec/acc/testdata/deployment_observability_2.tf | 22 ++--- ec/acc/testdata/deployment_observability_3.tf | 22 ++--- ec/acc/testdata/deployment_observability_4.tf | 22 ++--- .../testdata/deployment_observability_self.tf | 12 +-- .../deployment_observability_tpl_1.tf | 8 +- .../deployment_observability_tpl_2.tf | 10 ++- .../deployment_post_node_roles_upgrade_1.tf | 12 +-- .../deployment_post_node_roles_upgrade_2.tf | 12 +-- .../deployment_pre_node_roles_migration_1.tf | 12 +-- .../deployment_pre_node_roles_migration_2.tf | 12 +-- .../deployment_pre_node_roles_migration_3.tf | 23 +++--- ec/acc/testdata/deployment_security_1.tf | 8 +- ec/acc/testdata/deployment_security_2.tf | 10 ++- ec/acc/testdata/deployment_snapshot_1.tf | 8 +- ec/acc/testdata/deployment_snapshot_2.tf | 16 ++-- ...oyment_traffic_filter_association_basic.tf | 8 +- ...filter_association_basic_ignore_changes.tf | 8 +- ...traffic_filter_association_basic_update.tf | 8 +- ec/acc/testdata/deployment_upgrade_retry_1.tf | 10 ++- ec/acc/testdata/deployment_upgrade_retry_2.tf | 10 ++- .../deployment_with_extension_bundle_file.tf | 6 +- examples/deployment/deployment.tf | 6 +- examples/deployment_ccs/deployment.tf | 29 ++++--- .../elastic_deployment.tf | 7 +- examples/deployment_with_init/deployment.tf | 9 ++- 84 files changed, 812 insertions(+), 670 deletions(-) diff --git a/ec/acc/datasource_deployment_basic_test.go b/ec/acc/datasource_deployment_basic_test.go index d0944952f..e33ae87eb 100644 --- a/ec/acc/datasource_deployment_basic_test.go +++ b/ec/acc/datasource_deployment_basic_test.go @@ -57,11 +57,11 @@ func TestAccDatasourceDeployment_basic(t *testing.T) { resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.resource_id", resourceName, "elasticsearch.resource_id"), resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.http_endpoint_id", resourceName, "elasticsearch.http_endpoint_id"), resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.https_endpoint_id", resourceName, "elasticsearch.https_endpoint_id"), - resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.instance_configuration_id", resourceName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.size", resourceName, "elasticsearch.hot.size"), - resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.size_resource", resourceName, "elasticsearch.hot.size_resource"), - resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.zone_count", resourceName, "elasticsearch.hot.zone_count"), - resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.node_roles.*", resourceName, "elasticsearch.hot.node_roles.*"), + resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.instance_configuration_id", resourceName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.size", resourceName, "elasticsearch.topology.hot_content.size"), + resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.size_resource", resourceName, "elasticsearch.topology.hot_content.size_resource"), + resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.zone_count", resourceName, "elasticsearch.topology.hot_content.zone_count"), + resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.node_roles.*", resourceName, "elasticsearch.topology.hot_content.node_roles.*"), // Kibana resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.elasticsearch_cluster_ref_id", resourceName, "kibana.elasticsearch_cluster_ref_id"), diff --git a/ec/acc/deployment_autoscaling_test.go b/ec/acc/deployment_autoscaling_test.go index d4c0246d9..3ae42c578 100644 --- a/ec/acc/deployment_autoscaling_test.go +++ b/ec/acc/deployment_autoscaling_test.go @@ -46,32 +46,32 @@ func TestAccDeployment_autoscaling(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resName, "elasticsearch.autoscale", "true"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.cold.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.cold.size", "0g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.cold.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.cold.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.cold.autoscaling.max_size", "58g"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.cold.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.cold.size", "0g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.cold.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.cold.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.cold.autoscaling.max_size", "58g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.frozen.size", "0g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.frozen.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.frozen.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.frozen.autoscaling.max_size", "120g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.frozen.size", "0g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.frozen.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.frozen.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.frozen.autoscaling.max_size", "120g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.autoscaling.max_size", "8g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.autoscaling.max_size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.ml.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.ml.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.ml.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.ml.autoscaling.max_size", "4g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.ml.autoscaling.min_size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.ml.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.ml.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.ml.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.ml.autoscaling.max_size", "4g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.ml.autoscaling.min_size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.warm.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.warm.autoscaling.max_size", "15g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.autoscaling.max_size", "15g"), resource.TestCheckNoResourceAttr(resName, "kibana"), resource.TestCheckNoResourceAttr(resName, "apm"), @@ -83,17 +83,17 @@ func TestAccDeployment_autoscaling(t *testing.T) { Config: cfgF(disableAutoscale), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resName, "elasticsearch.autoscale", "false"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.autoscaling.max_size", "8g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.autoscaling.max_size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.warm.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.warm.autoscaling.max_size", "15g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.autoscaling.max_size", "15g"), resource.TestCheckNoResourceAttr(resName, "kibana"), resource.TestCheckNoResourceAttr(resName, "apm"), diff --git a/ec/acc/deployment_basic_defaults_test.go b/ec/acc/deployment_basic_defaults_test.go index 0b2f724b6..c93f27666 100644 --- a/ec/acc/deployment_basic_defaults_test.go +++ b/ec/acc/deployment_basic_defaults_test.go @@ -51,11 +51,11 @@ func TestAccDeployment_basic_defaults_first(t *testing.T) { // Checks the defaults which are populated using a mix of // Deployment Template and schema defaults. Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "8g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), @@ -72,11 +72,11 @@ func TestAccDeployment_basic_defaults_first(t *testing.T) { Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( // changed - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "8g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), resource.TestCheckResourceAttr(resName, "kibana.size", "2g"), resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), @@ -95,11 +95,11 @@ func TestAccDeployment_basic_defaults_first(t *testing.T) { // Remove all resources except Elasticsearch and Kibana and set a node type override Config: thirdConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), @@ -134,11 +134,11 @@ func TestAccDeployment_basic_defaults_hw(t *testing.T) { Config: cfg, // Create a deployment which only uses Elasticsearch resources Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), resource.TestCheckNoResourceAttr(resName, "kibana"), resource.TestCheckNoResourceAttr(resName, "apm"), resource.TestCheckNoResourceAttr(resName, "enterprise_search"), @@ -149,17 +149,17 @@ func TestAccDeployment_basic_defaults_hw(t *testing.T) { // hot warm, use defaults. Config: hotWarmCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.warm.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.warm.instance_configuration_id"), // Hot Warm defaults to 4g. - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "4g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size", "4g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.warm.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.warm.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "4g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size", "4g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.warm.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.zone_count", "2"), resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), diff --git a/ec/acc/deployment_basic_tags_test.go b/ec/acc/deployment_basic_tags_test.go index 43dfae2bf..b431de454 100644 --- a/ec/acc/deployment_basic_tags_test.go +++ b/ec/acc/deployment_basic_tags_test.go @@ -50,11 +50,11 @@ func TestAccDeployment_basic_tags(t *testing.T) { // Create a deployment with tags. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), resource.TestCheckNoResourceAttr(resName, "kibana"), resource.TestCheckNoResourceAttr(resName, "apm"), resource.TestCheckNoResourceAttr(resName, "enterprise_search"), @@ -69,11 +69,11 @@ func TestAccDeployment_basic_tags(t *testing.T) { // Remove a tag. Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), resource.TestCheckNoResourceAttr(resName, "kibana"), resource.TestCheckNoResourceAttr(resName, "apm"), resource.TestCheckNoResourceAttr(resName, "enterprise_search"), @@ -87,11 +87,11 @@ func TestAccDeployment_basic_tags(t *testing.T) { // Remove the tags block. Config: thirdConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), resource.TestCheckNoResourceAttr(resName, "kibana"), resource.TestCheckNoResourceAttr(resName, "apm"), resource.TestCheckNoResourceAttr(resName, "enterprise_search"), @@ -104,11 +104,11 @@ func TestAccDeployment_basic_tags(t *testing.T) { // Add the tags block with a single tag. Config: fourthConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), resource.TestCheckNoResourceAttr(resName, "kibana"), resource.TestCheckNoResourceAttr(resName, "apm"), resource.TestCheckNoResourceAttr(resName, "enterprise_search"), diff --git a/ec/acc/deployment_basic_test.go b/ec/acc/deployment_basic_test.go index d39415191..9115e25e9 100644 --- a/ec/acc/deployment_basic_test.go +++ b/ec/acc/deployment_basic_test.go @@ -196,8 +196,8 @@ func checkBasicDeploymentResource(resName, randomDeploymentName, deploymentVersi resource.TestCheckResourceAttrSet(resName, "apm.http_endpoint"), resource.TestCheckResourceAttrSet(resName, "apm.https_endpoint"), resource.TestCheckResourceAttr(resName, "elasticsearch.region", getRegion()), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), resource.TestCheckResourceAttrSet(resName, "elasticsearch.http_endpoint"), resource.TestCheckResourceAttrSet(resName, "elasticsearch.https_endpoint"), resource.TestCheckResourceAttr(resName, "kibana.region", getRegion()), diff --git a/ec/acc/deployment_ccs_test.go b/ec/acc/deployment_ccs_test.go index d7f41418f..e51b82e8c 100644 --- a/ec/acc/deployment_ccs_test.go +++ b/ec/acc/deployment_ccs_test.go @@ -53,10 +53,10 @@ func TestAccDeployment_ccs(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( // CCS Checks - resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.hot.instance_configuration_id"), + resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.topology.hot_content.instance_configuration_id"), // CCS defaults to 1g. - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.hot.size", "1g"), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.topology.hot_content.size_resource", "memory"), // Remote cluster settings resource.TestCheckResourceAttr(ccsResName, "elasticsearch.remote_cluster.#", "3"), @@ -67,26 +67,26 @@ func TestAccDeployment_ccs(t *testing.T) { resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.remote_cluster.2.deployment_id"), resource.TestCheckResourceAttr(ccsResName, "elasticsearch.remote_cluster.2.alias", fmt.Sprint(sourceRandomName, "-2")), - resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.hot.node_type_data"), - resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.hot.node_type_ingest"), - resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.hot.node_type_master"), - resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.hot.node_type_ml"), - resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.hot.zone_count", "1"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.topology.hot_content.node_type_data"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.topology.hot_content.node_type_ingest"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.topology.hot_content.node_type_master"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.topology.hot_content.node_type_ml"), + resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.topology.hot_content.zone_count", "1"), resource.TestCheckNoResourceAttr(sourceResName, "kibana"), resource.TestCheckNoResourceAttr(sourceResName, "apm"), resource.TestCheckNoResourceAttr(sourceResName, "enterprise_search"), // Source Checks - resource.TestCheckResourceAttrSet(sourceResName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttr(sourceResName, "elasticsearch.hot.size", "1g"), - resource.TestCheckResourceAttr(sourceResName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.hot.node_type_data"), - resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.hot.node_type_ingest"), - resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.hot.node_type_master"), - resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.hot.node_type_ml"), - resource.TestCheckResourceAttrSet(sourceResName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttr(sourceResName, "elasticsearch.hot.zone_count", "1"), + resource.TestCheckResourceAttrSet(sourceResName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(sourceResName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttr(sourceResName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.topology.hot_content.node_type_data"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.topology.hot_content.node_type_ingest"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.topology.hot_content.node_type_master"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.topology.hot_content.node_type_ml"), + resource.TestCheckResourceAttrSet(sourceResName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(sourceResName, "elasticsearch.topology.hot_content.zone_count", "1"), resource.TestCheckNoResourceAttr(sourceResName, "kibana"), resource.TestCheckNoResourceAttr(sourceResName, "apm"), resource.TestCheckNoResourceAttr(sourceResName, "enterprise_search"), @@ -97,19 +97,19 @@ func TestAccDeployment_ccs(t *testing.T) { Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( // Changes. - resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.hot.size", "2g"), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.hot.size_resource", "memory"), + resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.topology.hot_content.size", "2g"), + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.topology.hot_content.size_resource", "memory"), resource.TestCheckResourceAttr(ccsResName, "elasticsearch.remote_cluster.#", "0"), - resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.hot.node_type_data"), - resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.hot.node_type_ingest"), - resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.hot.node_type_master"), - resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.hot.node_type_ml"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.topology.hot_content.node_type_data"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.topology.hot_content.node_type_ingest"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.topology.hot_content.node_type_master"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.topology.hot_content.node_type_ml"), - resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.hot.zone_count", "1"), + resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.topology.hot_content.zone_count", "1"), resource.TestCheckResourceAttr(ccsResName, "kibana.zone_count", "1"), resource.TestCheckResourceAttrSet(ccsResName, "kibana.instance_configuration_id"), resource.TestCheckResourceAttr(ccsResName, "kibana.size", "1g"), diff --git a/ec/acc/deployment_compute_optimized_test.go b/ec/acc/deployment_compute_optimized_test.go index f130bf016..7c9feb952 100644 --- a/ec/acc/deployment_compute_optimized_test.go +++ b/ec/acc/deployment_compute_optimized_test.go @@ -41,11 +41,11 @@ func TestAccDeployment_computeOptimized(t *testing.T) { // Create a Compute Optimized deployment with the default settings. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "8g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), @@ -58,11 +58,11 @@ func TestAccDeployment_computeOptimized(t *testing.T) { // Change the Elasticsearch topology size and add APM instance. Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), diff --git a/ec/acc/deployment_dedicated_test.go b/ec/acc/deployment_dedicated_test.go index 73802fa5b..ff3e3af7a 100644 --- a/ec/acc/deployment_dedicated_test.go +++ b/ec/acc/deployment_dedicated_test.go @@ -40,22 +40,22 @@ func TestAccDeployment_dedicated_coordinating(t *testing.T) { Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet(resName, "elasticsearch.coordinating.instance_configuration_id"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.coordinating.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.coordinating.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "elasticsearch.coordinating.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.coordinating.size_resource", "memory"), - - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - - resource.TestCheckResourceAttrSet(resName, "elasticsearch.warm.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.warm.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.coordinating.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.coordinating.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.coordinating.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.coordinating.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.coordinating.size_resource", "memory"), + + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.warm.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size_resource", "memory"), resource.TestCheckNoResourceAttr(resName, "kibana"), resource.TestCheckNoResourceAttr(resName, "apm"), @@ -81,30 +81,30 @@ func TestAccDeployment_dedicated_master(t *testing.T) { // Create a deployment with dedicated master nodes. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet(resName, "elasticsearch.cold.instance_configuration_id"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.master.instance_configuration_id"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.warm.instance_configuration_id"), - - resource.TestCheckResourceAttr(resName, "elasticsearch.cold.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.cold.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.cold.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.cold.zone_count", "1"), - - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "3"), - - resource.TestCheckResourceAttr(resName, "elasticsearch.master.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.master.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.master.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.master.zone_count", "3"), - - resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.warm.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.warm.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.cold.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.master.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.warm.instance_configuration_id"), + + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.cold.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.cold.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.cold.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.cold.zone_count", "1"), + + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "3"), + + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.master.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.master.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.master.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.master.zone_count", "3"), + + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.warm.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.zone_count", "2"), resource.TestCheckNoResourceAttr(resName, "kibana"), resource.TestCheckNoResourceAttr(resName, "apm"), diff --git a/ec/acc/deployment_enterprise_search_test.go b/ec/acc/deployment_enterprise_search_test.go index 709955118..17d9b1388 100644 --- a/ec/acc/deployment_enterprise_search_test.go +++ b/ec/acc/deployment_enterprise_search_test.go @@ -41,11 +41,11 @@ func TestAccDeployment_enterpriseSearch(t *testing.T) { // Create an Enterprise Search deployment with the default settings. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "4g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "4g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), @@ -61,11 +61,11 @@ func TestAccDeployment_enterpriseSearch(t *testing.T) { // Change the Elasticsearch topology size. Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), diff --git a/ec/acc/deployment_hotwarm_test.go b/ec/acc/deployment_hotwarm_test.go index 9a72248b8..8d4c1246b 100644 --- a/ec/acc/deployment_hotwarm_test.go +++ b/ec/acc/deployment_hotwarm_test.go @@ -46,18 +46,18 @@ func TestAccDeployment_hotwarm(t *testing.T) { // Create a Hot / Warm deployment with the default settings. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.warm.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.warm.instance_configuration_id"), // Hot Warm defaults to 4g. - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "4g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size", "4g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "4g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size", "4g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.warm.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.warm.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.warm.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.zone_count", "2"), resource.TestCheckNoResourceAttr(resName, "kibana"), resource.TestCheckNoResourceAttr(resName, "apm"), resource.TestCheckNoResourceAttr(resName, "enterprise_search"), @@ -68,17 +68,17 @@ func TestAccDeployment_hotwarm(t *testing.T) { Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( // Changes. - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.warm.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.zone_count", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.warm.instance_configuration_id"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.warm.node_roles.#"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.warm.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.warm.node_roles.#"), resource.TestCheckNoResourceAttr(resName, "kibana"), resource.TestCheckNoResourceAttr(resName, "apm"), resource.TestCheckNoResourceAttr(resName, "enterprise_search"), diff --git a/ec/acc/deployment_memory_optimized_test.go b/ec/acc/deployment_memory_optimized_test.go index b527a9921..ac3c1a1e4 100644 --- a/ec/acc/deployment_memory_optimized_test.go +++ b/ec/acc/deployment_memory_optimized_test.go @@ -41,11 +41,11 @@ func TestAccDeployment_memoryOptimized(t *testing.T) { // Create a Memory Optimized deployment with the default settings. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "8g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), @@ -58,11 +58,11 @@ func TestAccDeployment_memoryOptimized(t *testing.T) { // Change the Elasticsearch topology size and add APM instance. Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), diff --git a/ec/acc/deployment_observability_tpl_test.go b/ec/acc/deployment_observability_tpl_test.go index 6acd74019..ddee3a6ac 100644 --- a/ec/acc/deployment_observability_tpl_test.go +++ b/ec/acc/deployment_observability_tpl_test.go @@ -41,11 +41,11 @@ func TestAccDeployment_observabilityTpl(t *testing.T) { // Create an Observability deployment with the default settings. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "8g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), @@ -61,10 +61,10 @@ func TestAccDeployment_observabilityTpl(t *testing.T) { // Change the Elasticsearch topology size. Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), diff --git a/ec/acc/deployment_post_node_role_upgrade_test.go b/ec/acc/deployment_post_node_role_upgrade_test.go index a1baf55fa..358329165 100644 --- a/ec/acc/deployment_post_node_role_upgrade_test.go +++ b/ec/acc/deployment_post_node_role_upgrade_test.go @@ -44,11 +44,11 @@ func TestAccDeployment_post_node_roles(t *testing.T) { { Config: cfgF(startCfg), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "1g"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "1"), resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_data"), resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_ingest"), @@ -63,11 +63,11 @@ func TestAccDeployment_post_node_roles(t *testing.T) { { Config: cfgF(upgradeVersionCfg), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "1g"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "1"), resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_data"), resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_ingest"), diff --git a/ec/acc/deployment_pre_node_role_migration_test.go b/ec/acc/deployment_pre_node_role_migration_test.go index 594cef2c1..71882f5e7 100644 --- a/ec/acc/deployment_pre_node_role_migration_test.go +++ b/ec/acc/deployment_pre_node_role_migration_test.go @@ -45,15 +45,15 @@ func TestAccDeployment_pre_node_roles(t *testing.T) { { Config: cfgF(startCfg), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.node_type_data", "true"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.node_type_ingest", "true"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.node_type_master", "true"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.node_type_ml", "false"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.node_roles.#", "0"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.node_type_data", "true"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.node_type_ingest", "true"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.node_type_master", "true"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.node_type_ml", "false"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.node_roles.#", "0"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "1"), resource.TestCheckNoResourceAttr(resName, "kibana"), resource.TestCheckNoResourceAttr(resName, "apm"), @@ -63,15 +63,15 @@ func TestAccDeployment_pre_node_roles(t *testing.T) { { Config: cfgF(upgradeVersionCfg), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.node_type_data", "true"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.node_type_ingest", "true"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.node_type_master", "true"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.node_type_ml", "false"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.node_roles.#", "0"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.node_type_data", "true"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.node_type_ingest", "true"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.node_type_master", "true"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.node_type_ml", "false"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.node_roles.#", "0"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "1"), resource.TestCheckNoResourceAttr(resName, "kibana"), resource.TestCheckNoResourceAttr(resName, "apm"), @@ -82,11 +82,11 @@ func TestAccDeployment_pre_node_roles(t *testing.T) { Config: cfgF(addWarmTopologyCfg), Check: resource.ComposeAggregateTestCheckFunc( // Hot - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "1"), resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_data"), resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_ingest"), @@ -94,11 +94,11 @@ func TestAccDeployment_pre_node_roles(t *testing.T) { resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_ml"), // Warm - resource.TestCheckResourceAttrSet(resName, "elasticsearch.warm.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.warm.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.warm.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.warm.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.warm.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.warm.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.zone_count", "1"), resource.TestCheckNoResourceAttr(resName, "elastic.warm.node_type_data"), resource.TestCheckNoResourceAttr(resName, "elastic.warm.node_type_ingest"), diff --git a/ec/acc/deployment_security_test.go b/ec/acc/deployment_security_test.go index b0ec77994..9fc27d324 100644 --- a/ec/acc/deployment_security_test.go +++ b/ec/acc/deployment_security_test.go @@ -41,11 +41,11 @@ func TestAccDeployment_security(t *testing.T) { // Create a Security deployment with the default settings. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "8g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), @@ -58,11 +58,11 @@ func TestAccDeployment_security(t *testing.T) { // Change the Elasticsearch topology size and add APM instance. Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.hot.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.hot.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), diff --git a/ec/acc/testdata/datasource_deployment_basic.tf b/ec/acc/testdata/datasource_deployment_basic.tf index 458c79725..59b324262 100644 --- a/ec/acc/testdata/datasource_deployment_basic.tf +++ b/ec/acc/testdata/datasource_deployment_basic.tf @@ -10,10 +10,12 @@ resource "ec_deployment" "basic_observability" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - zone_count = 1 - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } } @@ -26,10 +28,12 @@ resource "ec_deployment" "basic_datasource" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - zone_count = 1 - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } @@ -85,4 +89,4 @@ data "ec_deployments" "query" { depends_on = [ ec_deployment.basic_datasource, ] -} \ No newline at end of file +} diff --git a/ec/acc/testdata/datasource_stack_latest.tf b/ec/acc/testdata/datasource_stack_latest.tf index 11e87ea80..de873669e 100644 --- a/ec/acc/testdata/datasource_stack_latest.tf +++ b/ec/acc/testdata/datasource_stack_latest.tf @@ -2,4 +2,4 @@ data "ec_stack" "latest" { version_regex = "latest" lock = true region = "%s" -} \ No newline at end of file +} diff --git a/ec/acc/testdata/datasource_stack_regex.tf b/ec/acc/testdata/datasource_stack_regex.tf index 126b6e9a0..3d15ae5ce 100644 --- a/ec/acc/testdata/datasource_stack_regex.tf +++ b/ec/acc/testdata/datasource_stack_regex.tf @@ -1,4 +1,4 @@ data "ec_stack" "regex" { version_regex = "8.4.?" region = "%s" -} \ No newline at end of file +} diff --git a/ec/acc/testdata/datasource_tags.tf b/ec/acc/testdata/datasource_tags.tf index 8b88f7077..a25c2769d 100644 --- a/ec/acc/testdata/datasource_tags.tf +++ b/ec/acc/testdata/datasource_tags.tf @@ -15,10 +15,12 @@ resource "ec_deployment" "tags" { } elasticsearch = { - hot = { - size = "1g" - zone_count = 1 - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } } @@ -31,4 +33,4 @@ data "ec_deployments" "tagfilter" { tags = { "test_id" = "%s" } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_autoscaling_1.tf b/ec/acc/testdata/deployment_autoscaling_1.tf index 19c474d65..6461d7d53 100644 --- a/ec/acc/testdata/deployment_autoscaling_1.tf +++ b/ec/acc/testdata/deployment_autoscaling_1.tf @@ -12,40 +12,42 @@ resource "ec_deployment" "autoscaling" { elasticsearch = { autoscale = "true" - cold = { - size = "0g" - zone_count = 1 - autoscaling = {} - } + topology = { + "cold" = { + size = "0g" + zone_count = 1 + autoscaling = {} + } - frozen = { - size = "0g" - zone_count = 1 - autoscaling = {} - } + "frozen" = { + size = "0g" + zone_count = 1 + autoscaling = {} + } - hot = { - size = "1g" - zone_count = 1 - autoscaling = { - max_size = "8g" + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = { + max_size = "8g" + } } - } - ml = { - size = "1g" - zone_count = 1 - autoscaling = { - min_size = "1g" - max_size = "4g" + "ml" = { + size = "1g" + zone_count = 1 + autoscaling = { + min_size = "1g" + max_size = "4g" + } } - } - warm = { - size = "2g" - zone_count = 1 - autoscaling = { - max_size = "15g" + "warm" = { + size = "2g" + zone_count = 1 + autoscaling = { + max_size = "15g" + } } } } diff --git a/ec/acc/testdata/deployment_autoscaling_2.tf b/ec/acc/testdata/deployment_autoscaling_2.tf index c1a077522..718e4015d 100644 --- a/ec/acc/testdata/deployment_autoscaling_2.tf +++ b/ec/acc/testdata/deployment_autoscaling_2.tf @@ -12,40 +12,42 @@ resource "ec_deployment" "autoscaling" { elasticsearch = { autoscale = "false" - cold = { - size = "0g" - zone_count = 1 - autoscaling = {} - } + topology = { + "cold" = { + size = "0g" + zone_count = 1 + autoscaling = {} + } - frozen = { - size = "0g" - zone_count = 1 - autoscaling = {} - } + "frozen" = { + size = "0g" + zone_count = 1 + autoscaling = {} + } - hot = { - size = "1g" - zone_count = 1 - autoscaling = { - max_size = "8g" + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = { + max_size = "8g" + } } - } - ml = { - size = "0g" - zone_count = 1 - autoscaling = { - min_size = "0g" - max_size = "4g" + "ml" = { + size = "0g" + zone_count = 1 + autoscaling = { + min_size = "0g" + max_size = "4g" + } } - } - warm = { - size = "2g" - zone_count = 1 - autoscaling = { - max_size = "15g" + "warm" = { + size = "2g" + zone_count = 1 + autoscaling = { + max_size = "15g" + } } } } diff --git a/ec/acc/testdata/deployment_basic.tf b/ec/acc/testdata/deployment_basic.tf index 3bbe7becc..7920976fe 100644 --- a/ec/acc/testdata/deployment_basic.tf +++ b/ec/acc/testdata/deployment_basic.tf @@ -11,9 +11,11 @@ resource "ec_deployment" "basic" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } } } @@ -28,4 +30,4 @@ resource "ec_deployment" "basic" { enterprise_search = { instance_configuration_id = "%s" } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_basic_defaults_1.tf b/ec/acc/testdata/deployment_basic_defaults_1.tf index e057ef35c..c8e451820 100644 --- a/ec/acc/testdata/deployment_basic_defaults_1.tf +++ b/ec/acc/testdata/deployment_basic_defaults_1.tf @@ -10,8 +10,11 @@ resource "ec_deployment" "defaults" { deployment_template_id = "%s" elasticsearch = { - hot = { - autoscaling = {} + topology = { + "hot_content" = { + autoscaling = {} + } + } } @@ -20,4 +23,4 @@ resource "ec_deployment" "defaults" { enterprise_search = { zone_count = 1 } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_basic_defaults_2.tf b/ec/acc/testdata/deployment_basic_defaults_2.tf index 559c35662..8ff1ab50e 100644 --- a/ec/acc/testdata/deployment_basic_defaults_2.tf +++ b/ec/acc/testdata/deployment_basic_defaults_2.tf @@ -10,8 +10,10 @@ resource "ec_deployment" "defaults" { deployment_template_id = "%s" elasticsearch = { - hot = { - autoscaling = {} + topology = { + "hot_content" = { + autoscaling = {} + } } } @@ -26,4 +28,4 @@ resource "ec_deployment" "defaults" { enterprise_search = { zone_count = 1 } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_basic_defaults_3.tf b/ec/acc/testdata/deployment_basic_defaults_3.tf index 54a0ed51a..bcbf9ae4b 100644 --- a/ec/acc/testdata/deployment_basic_defaults_3.tf +++ b/ec/acc/testdata/deployment_basic_defaults_3.tf @@ -10,11 +10,13 @@ resource "ec_deployment" "defaults" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } } } kibana = {} -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_basic_defaults_hw_1.tf b/ec/acc/testdata/deployment_basic_defaults_hw_1.tf index 997c21523..650fac4a7 100644 --- a/ec/acc/testdata/deployment_basic_defaults_hw_1.tf +++ b/ec/acc/testdata/deployment_basic_defaults_hw_1.tf @@ -10,9 +10,11 @@ resource "ec_deployment" "defaults" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } } } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_basic_defaults_hw_2.tf b/ec/acc/testdata/deployment_basic_defaults_hw_2.tf index cd6aa7475..5bd5d6cad 100644 --- a/ec/acc/testdata/deployment_basic_defaults_hw_2.tf +++ b/ec/acc/testdata/deployment_basic_defaults_hw_2.tf @@ -10,11 +10,13 @@ resource "ec_deployment" "defaults" { deployment_template_id = "%s" elasticsearch = { - hot = { - autoscaling = {} - } - warm = { - autoscaling = {} + topology = { + "hot_content" = { + autoscaling = {} + } + "warm" = { + autoscaling = {} + } } } diff --git a/ec/acc/testdata/deployment_basic_integrations_server_1.tf b/ec/acc/testdata/deployment_basic_integrations_server_1.tf index 8faee116b..f99f85343 100644 --- a/ec/acc/testdata/deployment_basic_integrations_server_1.tf +++ b/ec/acc/testdata/deployment_basic_integrations_server_1.tf @@ -10,8 +10,10 @@ resource "ec_deployment" "basic" { deployment_template_id = "%s" elasticsearch = { - hot = { - autoscaling = {} + topology = { + "hot_content" = { + autoscaling = {} + } } } diff --git a/ec/acc/testdata/deployment_basic_integrations_server_2.tf b/ec/acc/testdata/deployment_basic_integrations_server_2.tf index 936e9d875..e8fff7cb5 100644 --- a/ec/acc/testdata/deployment_basic_integrations_server_2.tf +++ b/ec/acc/testdata/deployment_basic_integrations_server_2.tf @@ -10,8 +10,10 @@ resource "ec_deployment" "basic" { deployment_template_id = "%s" elasticsearch = { - hot = { - autoscaling = {} + topology = { + "hot_content" = { + autoscaling = {} + } } } diff --git a/ec/acc/testdata/deployment_basic_settings_config_1.tf b/ec/acc/testdata/deployment_basic_settings_config_1.tf index daaadb7f3..3db5cdec6 100644 --- a/ec/acc/testdata/deployment_basic_settings_config_1.tf +++ b/ec/acc/testdata/deployment_basic_settings_config_1.tf @@ -10,9 +10,11 @@ resource "ec_deployment" "basic" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } } } @@ -27,4 +29,4 @@ resource "ec_deployment" "basic" { enterprise_search = { instance_configuration_id = "%s" } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_basic_settings_config_2.tf b/ec/acc/testdata/deployment_basic_settings_config_2.tf index eb3f46205..cdb457544 100644 --- a/ec/acc/testdata/deployment_basic_settings_config_2.tf +++ b/ec/acc/testdata/deployment_basic_settings_config_2.tf @@ -13,9 +13,11 @@ resource "ec_deployment" "basic" { config = { user_settings_yaml = "action.auto_create_index: true" } - hot = { - size = "1g" - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } } } @@ -43,4 +45,4 @@ resource "ec_deployment" "basic" { instance_configuration_id = "%s" } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_basic_settings_config_import.tf b/ec/acc/testdata/deployment_basic_settings_config_import.tf index 8209f974b..a8ff62dc7 100644 --- a/ec/acc/testdata/deployment_basic_settings_config_import.tf +++ b/ec/acc/testdata/deployment_basic_settings_config_import.tf @@ -10,36 +10,36 @@ resource "ec_deployment" "basic" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } + + "warm" = { + autoscaling = {} + } + + "cold" = { + autoscaling = {} + } + + "frozen" = { + autoscaling = {} + } + + "ml" = { + autoscaling = {} + } + + "master" = { + autoscaling = {} + } + + "coordinating" = { + autoscaling = {} + } } - - warm = { - autoscaling = {} - } - - cold = { - autoscaling = {} - } - - frozen = { - autoscaling = {} - } - - ml = { - autoscaling = {} - } - - master = { - autoscaling = {} - } - - coordinating = { - autoscaling = {} - } - - config = {} } kibana = { @@ -53,4 +53,4 @@ resource "ec_deployment" "basic" { enterprise_search = { instance_configuration_id = "%s" } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_basic_tags_1.tf b/ec/acc/testdata/deployment_basic_tags_1.tf index a6cba36f1..c0d4f9392 100644 --- a/ec/acc/testdata/deployment_basic_tags_1.tf +++ b/ec/acc/testdata/deployment_basic_tags_1.tf @@ -10,9 +10,11 @@ resource "ec_deployment" "tags" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "2g" - autoscaling = {} + topology = { + "hot_content" = { + size = "2g" + autoscaling = {} + } } } @@ -20,4 +22,4 @@ resource "ec_deployment" "tags" { owner = "elastic" cost-center = "rnd" } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_basic_tags_2.tf b/ec/acc/testdata/deployment_basic_tags_2.tf index 5569f971b..4d2ca4ff7 100644 --- a/ec/acc/testdata/deployment_basic_tags_2.tf +++ b/ec/acc/testdata/deployment_basic_tags_2.tf @@ -10,13 +10,15 @@ resource "ec_deployment" "tags" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "2g" - autoscaling = {} + topology = { + "hot_content" = { + size = "2g" + autoscaling = {} + } } } tags = { owner = "elastic" } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_basic_tags_3.tf b/ec/acc/testdata/deployment_basic_tags_3.tf index b8af88b40..e3be70428 100644 --- a/ec/acc/testdata/deployment_basic_tags_3.tf +++ b/ec/acc/testdata/deployment_basic_tags_3.tf @@ -10,9 +10,11 @@ resource "ec_deployment" "tags" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "2g" - autoscaling = {} + topology = { + "hot_content" = { + size = "2g" + autoscaling = {} + } } } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_basic_tags_4.tf b/ec/acc/testdata/deployment_basic_tags_4.tf index 202dddb0e..f6336ac8f 100644 --- a/ec/acc/testdata/deployment_basic_tags_4.tf +++ b/ec/acc/testdata/deployment_basic_tags_4.tf @@ -10,13 +10,15 @@ resource "ec_deployment" "tags" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "2g" - autoscaling = {} + topology = { + "hot_content" = { + size = "2g" + autoscaling = {} + } } } tags = { new = "tag" } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_basic_with_traffic_filter_2.tf b/ec/acc/testdata/deployment_basic_with_traffic_filter_2.tf index 04c4bf903..294f9798f 100644 --- a/ec/acc/testdata/deployment_basic_with_traffic_filter_2.tf +++ b/ec/acc/testdata/deployment_basic_with_traffic_filter_2.tf @@ -10,9 +10,11 @@ resource "ec_deployment" "basic" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } } } diff --git a/ec/acc/testdata/deployment_basic_with_traffic_filter_3.tf b/ec/acc/testdata/deployment_basic_with_traffic_filter_3.tf index 85026ccd3..dde629a7f 100644 --- a/ec/acc/testdata/deployment_basic_with_traffic_filter_3.tf +++ b/ec/acc/testdata/deployment_basic_with_traffic_filter_3.tf @@ -10,9 +10,11 @@ resource "ec_deployment" "basic" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } } } diff --git a/ec/acc/testdata/deployment_ccs_1.tf b/ec/acc/testdata/deployment_ccs_1.tf index 9a0166aad..846dc1e62 100644 --- a/ec/acc/testdata/deployment_ccs_1.tf +++ b/ec/acc/testdata/deployment_ccs_1.tf @@ -10,8 +10,10 @@ resource "ec_deployment" "ccs" { deployment_template_id = "%s" elasticsearch = { - hot = { - autoscaling = {} + topology = { + "hot_content" = { + autoscaling = {} + } } "remote_cluster" = [for source_css in ec_deployment.source_ccs : @@ -31,10 +33,12 @@ resource "ec_deployment" "source_ccs" { deployment_template_id = "%s" elasticsearch = { - hot = { - zone_count = 1 - size = "1g" - autoscaling = {} + topology = { + "hot_content" = { + zone_count = 1 + size = "1g" + autoscaling = {} + } } } } diff --git a/ec/acc/testdata/deployment_ccs_2.tf b/ec/acc/testdata/deployment_ccs_2.tf index 3a76e7c5a..821097202 100644 --- a/ec/acc/testdata/deployment_ccs_2.tf +++ b/ec/acc/testdata/deployment_ccs_2.tf @@ -10,11 +10,13 @@ resource "ec_deployment" "ccs" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "2g" - autoscaling = {} + topology = { + "hot_content" = { + size = "2g" + autoscaling = {} + } } } kibana = {} -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_compute_optimized_1.tf b/ec/acc/testdata/deployment_compute_optimized_1.tf index 6826c5d2a..5e0124696 100644 --- a/ec/acc/testdata/deployment_compute_optimized_1.tf +++ b/ec/acc/testdata/deployment_compute_optimized_1.tf @@ -10,10 +10,12 @@ resource "ec_deployment" "compute_optimized" { deployment_template_id = "%s" elasticsearch = { - hot = { - autoscaling = {} + topology = { + "hot_content" = { + autoscaling = {} + } } } kibana = {} -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_compute_optimized_2.tf b/ec/acc/testdata/deployment_compute_optimized_2.tf index f6f26f1c0..d72704871 100644 --- a/ec/acc/testdata/deployment_compute_optimized_2.tf +++ b/ec/acc/testdata/deployment_compute_optimized_2.tf @@ -10,13 +10,15 @@ resource "ec_deployment" "compute_optimized" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "2g" - autoscaling = {} + topology = { + "hot_content" = { + size = "2g" + autoscaling = {} + } } } kibana = {} apm = {} -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_dedicated_coordinating.tf b/ec/acc/testdata/deployment_dedicated_coordinating.tf index 07a500b1f..a9aa4af2e 100644 --- a/ec/acc/testdata/deployment_dedicated_coordinating.tf +++ b/ec/acc/testdata/deployment_dedicated_coordinating.tf @@ -10,22 +10,24 @@ resource "ec_deployment" "dedicated_coordinating" { deployment_template_id = "%s" elasticsearch = { - coordinating = { - zone_count = 2 - size = "1g" - autoscaling = {} - } + topology = { + "coordinating" = { + zone_count = 2 + size = "1g" + autoscaling = {} + } - hot = { - zone_count = 1 - size = "1g" - autoscaling = {} - } + "hot_content" = { + zone_count = 1 + size = "1g" + autoscaling = {} + } - warm = { - zone_count = 1 - size = "2g" - autoscaling = {} + "warm" = { + zone_count = 1 + size = "2g" + autoscaling = {} + } } } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_dedicated_master.tf b/ec/acc/testdata/deployment_dedicated_master.tf index d1ed6e9eb..6c29f0452 100644 --- a/ec/acc/testdata/deployment_dedicated_master.tf +++ b/ec/acc/testdata/deployment_dedicated_master.tf @@ -10,28 +10,30 @@ resource "ec_deployment" "dedicated_master" { deployment_template_id = "%s" elasticsearch = { - cold = { - zone_count = 1 - size = "2g" - autoscaling = {} - } + topology = { + "cold" = { + zone_count = 1 + size = "2g" + autoscaling = {} + } - hot = { - zone_count = 3 - size = "1g" - autoscaling = {} - } + "hot_content" = { + zone_count = 3 + size = "1g" + autoscaling = {} + } - master = { - zone_count = 3 - size = "1g" - autoscaling = {} - } + "master" = { + zone_count = 3 + size = "1g" + autoscaling = {} + } - warm = { - zone_count = 2 - size = "2g" - autoscaling = {} + "warm" = { + zone_count = 2 + size = "2g" + autoscaling = {} + } } } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_docker_image_override.tf b/ec/acc/testdata/deployment_docker_image_override.tf index d4712df1e..96f0a76d0 100644 --- a/ec/acc/testdata/deployment_docker_image_override.tf +++ b/ec/acc/testdata/deployment_docker_image_override.tf @@ -20,10 +20,12 @@ resource "ec_deployment" "docker_image" { docker_image = "docker.elastic.co/cloud-ci/elasticsearch:7.15.0-SNAPSHOT" } - hot = { - size = "1g" - zone_count = 1 - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } diff --git a/ec/acc/testdata/deployment_elasticsearch_keystore_1.tf b/ec/acc/testdata/deployment_elasticsearch_keystore_1.tf index efab4a609..5733d1f3a 100644 --- a/ec/acc/testdata/deployment_elasticsearch_keystore_1.tf +++ b/ec/acc/testdata/deployment_elasticsearch_keystore_1.tf @@ -10,10 +10,12 @@ resource "ec_deployment" "keystore" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - zone_count = 1 - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } } diff --git a/ec/acc/testdata/deployment_elasticsearch_keystore_1_migrated.tf b/ec/acc/testdata/deployment_elasticsearch_keystore_1_migrated.tf index efab4a609..5733d1f3a 100644 --- a/ec/acc/testdata/deployment_elasticsearch_keystore_1_migrated.tf +++ b/ec/acc/testdata/deployment_elasticsearch_keystore_1_migrated.tf @@ -10,10 +10,12 @@ resource "ec_deployment" "keystore" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - zone_count = 1 - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } } diff --git a/ec/acc/testdata/deployment_elasticsearch_keystore_2.tf b/ec/acc/testdata/deployment_elasticsearch_keystore_2.tf index 1dfd64808..d9a2ac720 100644 --- a/ec/acc/testdata/deployment_elasticsearch_keystore_2.tf +++ b/ec/acc/testdata/deployment_elasticsearch_keystore_2.tf @@ -10,10 +10,12 @@ resource "ec_deployment" "keystore" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - zone_count = 1 - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } } diff --git a/ec/acc/testdata/deployment_elasticsearch_keystore_3.tf b/ec/acc/testdata/deployment_elasticsearch_keystore_3.tf index a20a03a8f..7cabe7f26 100644 --- a/ec/acc/testdata/deployment_elasticsearch_keystore_3.tf +++ b/ec/acc/testdata/deployment_elasticsearch_keystore_3.tf @@ -10,10 +10,12 @@ resource "ec_deployment" "keystore" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - zone_count = 1 - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } } diff --git a/ec/acc/testdata/deployment_elasticsearch_keystore_4.tf b/ec/acc/testdata/deployment_elasticsearch_keystore_4.tf index 09d7564eb..670e41b1c 100644 --- a/ec/acc/testdata/deployment_elasticsearch_keystore_4.tf +++ b/ec/acc/testdata/deployment_elasticsearch_keystore_4.tf @@ -10,10 +10,12 @@ resource "ec_deployment" "keystore" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - zone_count = 1 - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } } diff --git a/ec/acc/testdata/deployment_elasticsearch_keystore_creds.json b/ec/acc/testdata/deployment_elasticsearch_keystore_creds.json index 337663782..b8e5d8443 100644 --- a/ec/acc/testdata/deployment_elasticsearch_keystore_creds.json +++ b/ec/acc/testdata/deployment_elasticsearch_keystore_creds.json @@ -9,4 +9,4 @@ "token_uri": "https://accounts.google.com/o/oauth2/token", "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/service-account-email" -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_emptyconfig.tf b/ec/acc/testdata/deployment_emptyconfig.tf index b8e1122b6..ce2647acd 100644 --- a/ec/acc/testdata/deployment_emptyconfig.tf +++ b/ec/acc/testdata/deployment_emptyconfig.tf @@ -13,10 +13,12 @@ resource "ec_deployment" "emptyconfig" { config = { user_settings_yaml = null } - hot = { - size = "1g" - zone_count = 1 - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_enterprise_search_1.tf b/ec/acc/testdata/deployment_enterprise_search_1.tf index 91ea5a30d..0797989b9 100644 --- a/ec/acc/testdata/deployment_enterprise_search_1.tf +++ b/ec/acc/testdata/deployment_enterprise_search_1.tf @@ -10,12 +10,14 @@ resource "ec_deployment" "enterprise_search" { deployment_template_id = "%s" elasticsearch = { - hot = { - autoscaling = {} + topology = { + "hot_content" = { + autoscaling = {} + } } } kibana = {} enterprise_search = {} -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_enterprise_search_2.tf b/ec/acc/testdata/deployment_enterprise_search_2.tf index fb610a94e..5afc2f3e8 100644 --- a/ec/acc/testdata/deployment_enterprise_search_2.tf +++ b/ec/acc/testdata/deployment_enterprise_search_2.tf @@ -10,13 +10,15 @@ resource "ec_deployment" "enterprise_search" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "2g" - autoscaling = {} + topology = { + "hot_content" = { + size = "2g" + autoscaling = {} + } } } kibana = {} enterprise_search = {} -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_hotwarm_1.tf b/ec/acc/testdata/deployment_hotwarm_1.tf index ac5502b1e..e1f20ca6b 100644 --- a/ec/acc/testdata/deployment_hotwarm_1.tf +++ b/ec/acc/testdata/deployment_hotwarm_1.tf @@ -10,12 +10,14 @@ resource "ec_deployment" "hotwarm" { deployment_template_id = "%s" elasticsearch = { - hot = { - autoscaling = {} - } + topology = { + "hot_content" = { + autoscaling = {} + } - warm = { - autoscaling = {} + "warm" = { + autoscaling = {} + } } } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_hotwarm_2.tf b/ec/acc/testdata/deployment_hotwarm_2.tf index 489a5d10a..d53fdda74 100644 --- a/ec/acc/testdata/deployment_hotwarm_2.tf +++ b/ec/acc/testdata/deployment_hotwarm_2.tf @@ -10,17 +10,18 @@ resource "ec_deployment" "hotwarm" { deployment_template_id = "%s" elasticsearch = { - hot = { - zone_count = 1 - size = "1g" - autoscaling = {} - } + topology = { + "hot_content" = { + zone_count = 1 + size = "1g" + autoscaling = {} + } - warm = { - zone_count = 1 - size = "2g" - autoscaling = {} + "warm" = { + zone_count = 1 + size = "2g" + autoscaling = {} + } } - } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_memory_optimized_1.tf b/ec/acc/testdata/deployment_memory_optimized_1.tf index 57848c402..05445f84e 100644 --- a/ec/acc/testdata/deployment_memory_optimized_1.tf +++ b/ec/acc/testdata/deployment_memory_optimized_1.tf @@ -10,10 +10,12 @@ resource "ec_deployment" "memory_optimized" { deployment_template_id = "%s" elasticsearch = { - hot = { - autoscaling = {} + topology = { + "hot_content" = { + autoscaling = {} + } } } kibana = {} -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_memory_optimized_2.tf b/ec/acc/testdata/deployment_memory_optimized_2.tf index a0a5f5e4e..d547b04a1 100644 --- a/ec/acc/testdata/deployment_memory_optimized_2.tf +++ b/ec/acc/testdata/deployment_memory_optimized_2.tf @@ -10,13 +10,15 @@ resource "ec_deployment" "memory_optimized" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "2g" - autoscaling = {} + topology = { + "hot_content" = { + size = "2g" + autoscaling = {} + } } } kibana = {} apm = {} -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_observability_1.tf b/ec/acc/testdata/deployment_observability_1.tf index 6ff24f65b..48ccb6663 100644 --- a/ec/acc/testdata/deployment_observability_1.tf +++ b/ec/acc/testdata/deployment_observability_1.tf @@ -10,10 +10,12 @@ resource "ec_deployment" "basic" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - zone_count = 1 - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } } @@ -25,14 +27,16 @@ resource "ec_deployment" "observability" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - zone_count = 1 - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } observability = { deployment_id = ec_deployment.basic.id } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_observability_2.tf b/ec/acc/testdata/deployment_observability_2.tf index dd626cc62..79e2dffa6 100644 --- a/ec/acc/testdata/deployment_observability_2.tf +++ b/ec/acc/testdata/deployment_observability_2.tf @@ -10,10 +10,12 @@ resource "ec_deployment" "basic" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - zone_count = 1 - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } } @@ -25,10 +27,12 @@ resource "ec_deployment" "observability" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - zone_count = 1 - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } @@ -36,4 +40,4 @@ resource "ec_deployment" "observability" { deployment_id = ec_deployment.basic.id metrics = false } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_observability_3.tf b/ec/acc/testdata/deployment_observability_3.tf index 0587d25ca..0bf364b50 100644 --- a/ec/acc/testdata/deployment_observability_3.tf +++ b/ec/acc/testdata/deployment_observability_3.tf @@ -10,10 +10,12 @@ resource "ec_deployment" "basic" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - zone_count = 1 - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } } @@ -25,10 +27,12 @@ resource "ec_deployment" "observability" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - zone_count = 1 - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } @@ -36,4 +40,4 @@ resource "ec_deployment" "observability" { deployment_id = ec_deployment.basic.id logs = false } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_observability_4.tf b/ec/acc/testdata/deployment_observability_4.tf index 411be808e..244dca5f6 100644 --- a/ec/acc/testdata/deployment_observability_4.tf +++ b/ec/acc/testdata/deployment_observability_4.tf @@ -10,10 +10,12 @@ resource "ec_deployment" "basic" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - zone_count = 1 - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } } @@ -25,10 +27,12 @@ resource "ec_deployment" "observability" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - zone_count = 1 - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_observability_self.tf b/ec/acc/testdata/deployment_observability_self.tf index 1c8523027..de9152b04 100644 --- a/ec/acc/testdata/deployment_observability_self.tf +++ b/ec/acc/testdata/deployment_observability_self.tf @@ -16,10 +16,12 @@ resource "ec_deployment" "observability" { elasticsearch = { autoscale = "false" - hot = { - size = "1g" - zone_count = 1 - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } @@ -27,4 +29,4 @@ resource "ec_deployment" "observability" { size = "1g" zone_count = 1 } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_observability_tpl_1.tf b/ec/acc/testdata/deployment_observability_tpl_1.tf index b58f1ecd9..c770687a3 100644 --- a/ec/acc/testdata/deployment_observability_tpl_1.tf +++ b/ec/acc/testdata/deployment_observability_tpl_1.tf @@ -10,12 +10,14 @@ resource "ec_deployment" "observability_tpl" { deployment_template_id = "%s" elasticsearch = { - hot = { - autoscaling = {} + topology = { + "hot_content" = { + autoscaling = {} + } } } kibana = {} apm = {} -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_observability_tpl_2.tf b/ec/acc/testdata/deployment_observability_tpl_2.tf index 896393889..1c7b46e81 100644 --- a/ec/acc/testdata/deployment_observability_tpl_2.tf +++ b/ec/acc/testdata/deployment_observability_tpl_2.tf @@ -10,13 +10,15 @@ resource "ec_deployment" "observability_tpl" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "2g" - autoscaling = {} + topology = { + "hot_content" = { + size = "2g" + autoscaling = {} + } } } kibana = {} apm = {} -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_post_node_roles_upgrade_1.tf b/ec/acc/testdata/deployment_post_node_roles_upgrade_1.tf index 68fe0810d..b1b1d1573 100644 --- a/ec/acc/testdata/deployment_post_node_roles_upgrade_1.tf +++ b/ec/acc/testdata/deployment_post_node_roles_upgrade_1.tf @@ -10,10 +10,12 @@ resource "ec_deployment" "post_nr_upgrade" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - zone_count = 1 - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_post_node_roles_upgrade_2.tf b/ec/acc/testdata/deployment_post_node_roles_upgrade_2.tf index a28f001f2..67685532a 100644 --- a/ec/acc/testdata/deployment_post_node_roles_upgrade_2.tf +++ b/ec/acc/testdata/deployment_post_node_roles_upgrade_2.tf @@ -10,10 +10,12 @@ resource "ec_deployment" "post_nr_upgrade" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - zone_count = 1 - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_pre_node_roles_migration_1.tf b/ec/acc/testdata/deployment_pre_node_roles_migration_1.tf index 46f7c995f..28c45b6a5 100644 --- a/ec/acc/testdata/deployment_pre_node_roles_migration_1.tf +++ b/ec/acc/testdata/deployment_pre_node_roles_migration_1.tf @@ -10,10 +10,12 @@ resource "ec_deployment" "pre_nr" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - zone_count = 1 - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_pre_node_roles_migration_2.tf b/ec/acc/testdata/deployment_pre_node_roles_migration_2.tf index ede18f462..c1934b5ac 100644 --- a/ec/acc/testdata/deployment_pre_node_roles_migration_2.tf +++ b/ec/acc/testdata/deployment_pre_node_roles_migration_2.tf @@ -10,10 +10,12 @@ resource "ec_deployment" "pre_nr" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - zone_count = 1 - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_pre_node_roles_migration_3.tf b/ec/acc/testdata/deployment_pre_node_roles_migration_3.tf index 4cbdf674d..de80cdb00 100644 --- a/ec/acc/testdata/deployment_pre_node_roles_migration_3.tf +++ b/ec/acc/testdata/deployment_pre_node_roles_migration_3.tf @@ -10,17 +10,18 @@ resource "ec_deployment" "pre_nr" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - zone_count = 1 - autoscaling = {} - } + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } - warm = { - size = "2g" - zone_count = 1 - autoscaling = {} + "warm" = { + size = "2g" + zone_count = 1 + autoscaling = {} + } } - } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_security_1.tf b/ec/acc/testdata/deployment_security_1.tf index 235966c38..fc9c94ca5 100644 --- a/ec/acc/testdata/deployment_security_1.tf +++ b/ec/acc/testdata/deployment_security_1.tf @@ -10,10 +10,12 @@ resource "ec_deployment" "security" { deployment_template_id = "%s" elasticsearch = { - hot = { - autoscaling = {} + topology = { + "hot_content" = { + autoscaling = {} + } } } kibana = {} -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_security_2.tf b/ec/acc/testdata/deployment_security_2.tf index 25c4f4e86..3a71e020b 100644 --- a/ec/acc/testdata/deployment_security_2.tf +++ b/ec/acc/testdata/deployment_security_2.tf @@ -10,13 +10,15 @@ resource "ec_deployment" "security" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "2g" - autoscaling = {} + topology = { + "hot_content" = { + size = "2g" + autoscaling = {} + } } } kibana = {} apm = {} -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_snapshot_1.tf b/ec/acc/testdata/deployment_snapshot_1.tf index 630dc9ea7..07e035a2f 100644 --- a/ec/acc/testdata/deployment_snapshot_1.tf +++ b/ec/acc/testdata/deployment_snapshot_1.tf @@ -15,9 +15,11 @@ resource "ec_deployment" "snapshot_source" { deployment_template_id = local.deployment_template elasticsearch = { - hot = { - size = "1g" - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } } } } diff --git a/ec/acc/testdata/deployment_snapshot_2.tf b/ec/acc/testdata/deployment_snapshot_2.tf index 712cd3716..7f41cd18b 100644 --- a/ec/acc/testdata/deployment_snapshot_2.tf +++ b/ec/acc/testdata/deployment_snapshot_2.tf @@ -15,9 +15,11 @@ resource "ec_deployment" "snapshot_source" { deployment_template_id = local.deployment_template elasticsearch = { - hot = { - size = "1g" - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } } } } @@ -34,9 +36,11 @@ resource "ec_deployment" "snapshot_target" { source_elasticsearch_cluster_id = ec_deployment.snapshot_source.elasticsearch.0.resource_id }] - hot = { - size = "1g" - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } } } } diff --git a/ec/acc/testdata/deployment_traffic_filter_association_basic.tf b/ec/acc/testdata/deployment_traffic_filter_association_basic.tf index c41cb7f6a..80ffb1c60 100644 --- a/ec/acc/testdata/deployment_traffic_filter_association_basic.tf +++ b/ec/acc/testdata/deployment_traffic_filter_association_basic.tf @@ -10,9 +10,11 @@ resource "ec_deployment" "tf_assoc" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } } } diff --git a/ec/acc/testdata/deployment_traffic_filter_association_basic_ignore_changes.tf b/ec/acc/testdata/deployment_traffic_filter_association_basic_ignore_changes.tf index 6aeea7873..0e4cbc876 100644 --- a/ec/acc/testdata/deployment_traffic_filter_association_basic_ignore_changes.tf +++ b/ec/acc/testdata/deployment_traffic_filter_association_basic_ignore_changes.tf @@ -10,9 +10,11 @@ resource "ec_deployment" "tf_assoc" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } } } diff --git a/ec/acc/testdata/deployment_traffic_filter_association_basic_update.tf b/ec/acc/testdata/deployment_traffic_filter_association_basic_update.tf index a756bafe6..3ffd74f67 100644 --- a/ec/acc/testdata/deployment_traffic_filter_association_basic_update.tf +++ b/ec/acc/testdata/deployment_traffic_filter_association_basic_update.tf @@ -10,9 +10,11 @@ resource "ec_deployment" "tf_assoc" { deployment_template_id = "%s" elasticsearch = { - hot = { - size = "1g" - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } } } diff --git a/ec/acc/testdata/deployment_upgrade_retry_1.tf b/ec/acc/testdata/deployment_upgrade_retry_1.tf index 58a9e01ad..6ab4ec207 100644 --- a/ec/acc/testdata/deployment_upgrade_retry_1.tf +++ b/ec/acc/testdata/deployment_upgrade_retry_1.tf @@ -15,10 +15,12 @@ resource "ec_deployment" "upgrade_retry" { deployment_template_id = local.deployment_template elasticsearch = { - hot = { - size = "1g" - zone_count = 1 - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } diff --git a/ec/acc/testdata/deployment_upgrade_retry_2.tf b/ec/acc/testdata/deployment_upgrade_retry_2.tf index ae13523e3..a9913008c 100644 --- a/ec/acc/testdata/deployment_upgrade_retry_2.tf +++ b/ec/acc/testdata/deployment_upgrade_retry_2.tf @@ -15,10 +15,12 @@ resource "ec_deployment" "upgrade_retry" { deployment_template_id = local.deployment_template elasticsearch = { - hot = { - size = "1g" - zone_count = 1 - autoscaling = {} + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } diff --git a/ec/acc/testdata/deployment_with_extension_bundle_file.tf b/ec/acc/testdata/deployment_with_extension_bundle_file.tf index 7cb1fef0b..6e83c3b77 100644 --- a/ec/acc/testdata/deployment_with_extension_bundle_file.tf +++ b/ec/acc/testdata/deployment_with_extension_bundle_file.tf @@ -18,8 +18,10 @@ resource "ec_deployment" "with_extension" { deployment_template_id = local.deployment_template elasticsearch = { - hot = { - autoscaling = {} + topology = { + "hot_content" = { + autoscaling = {} + } } extension = [{ type = "bundle" diff --git a/examples/deployment/deployment.tf b/examples/deployment/deployment.tf index af7ef098d..de0c178a7 100644 --- a/examples/deployment/deployment.tf +++ b/examples/deployment/deployment.tf @@ -30,8 +30,10 @@ resource "ec_deployment" "example_minimal" { elasticsearch = { - hot = { - autoscaling = {} + topology = { + "hot_content" = { + autoscaling = {} + } } config = { user_settings_yaml = file("./es_settings.yaml") diff --git a/examples/deployment_ccs/deployment.tf b/examples/deployment_ccs/deployment.tf index 26e9b7abb..e71fe4023 100644 --- a/examples/deployment_ccs/deployment.tf +++ b/examples/deployment_ccs/deployment.tf @@ -25,11 +25,12 @@ resource "ec_deployment" "source_deployment" { deployment_template_id = "aws-io-optimized-v2" elasticsearch = { - config = {} - hot = { - zone_count = 1 - size = "2g" - autoscaling = {} + topology = { + "hot_content" = { + zone_count = 1 + size = "2g" + autoscaling = {} + } } } } @@ -42,11 +43,12 @@ resource "ec_deployment" "second_source" { deployment_template_id = "aws-io-optimized-v2" elasticsearch = { - config = {} - hot = { - zone_count = 1 - size = "2g" - autoscaling = {} + topology = { + "hot_content" = { + zone_count = 1 + size = "2g" + autoscaling = {} + } } } } @@ -59,9 +61,10 @@ resource "ec_deployment" "ccs" { deployment_template_id = "aws-cross-cluster-search-v2" elasticsearch = { - config = {} - hot = { - autoscaling = {} + topology = { + "hot_content" = { + autoscaling = {} + } } remote_cluster = [ diff --git a/examples/deployment_ec2_instance/elastic_deployment.tf b/examples/deployment_ec2_instance/elastic_deployment.tf index 8ac2a3a04..d1d0221f0 100644 --- a/examples/deployment_ec2_instance/elastic_deployment.tf +++ b/examples/deployment_ec2_instance/elastic_deployment.tf @@ -17,9 +17,10 @@ resource "ec_deployment" "deployment" { # Note the deployment will contain Elasticsearch and Kibana resources with default configurations. elasticsearch = { - config = {} - hot = { - autoscaling = {} + topology = { + "hot_content" = { + autoscaling = {} + } } } diff --git a/examples/deployment_with_init/deployment.tf b/examples/deployment_with_init/deployment.tf index 0a7576fa1..7300525c5 100644 --- a/examples/deployment_with_init/deployment.tf +++ b/examples/deployment_with_init/deployment.tf @@ -15,10 +15,11 @@ resource "ec_deployment" "example_minimal" { deployment_template_id = "aws-io-optimized-v2" traffic_filter = [ec_deployment_traffic_filter.allow_all.id] elasticsearch = { - config = {} - hot = { - size = "8g" - autoscaling = {} + topology = { + "hot_content" = { + size = "8g" + autoscaling = {} + } } } From f9747201f3f61716a5f8daac369eebaa18667099 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Wed, 25 Jan 2023 15:52:42 +0100 Subject: [PATCH 102/104] remove obsolete code --- .../deploymentresource/elasticsearch/v1/schema.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go b/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go index 4aac61fe8..c2abf03ae 100644 --- a/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go @@ -281,16 +281,12 @@ func ElasticsearchRemoteClusterSchema() tfsdk.Attribute { "deployment_id": { Description: "Remote deployment ID", Type: types.StringType, - // TODO fix examples/deployment_css/deployment.tf#61 - // Validators: []tfsdk.AttributeValidator{validators.Length(32, 32)}, - Required: true, + Required: true, }, "alias": { Description: "Alias for this Cross Cluster Search binding", Type: types.StringType, - // TODO fix examples/deployment_css/deployment.tf#62 - // Validators: []tfsdk.AttributeValidator{validators.NotEmpty()}, - Required: true, + Required: true, }, "ref_id": { Description: `Remote elasticsearch "ref_id", it is best left to the default value`, From 2a17c5b85c8b2e4a690686931f4ca26358bae598 Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Thu, 26 Jan 2023 13:34:13 +0100 Subject: [PATCH 103/104] fix errors in acceptance tests --- ec/acc/testdata/deployment_basic_settings_config_import.tf | 2 ++ ec/acc/testdata/deployment_elasticsearch_keystore_creds.json | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/ec/acc/testdata/deployment_basic_settings_config_import.tf b/ec/acc/testdata/deployment_basic_settings_config_import.tf index a8ff62dc7..ef77aaeae 100644 --- a/ec/acc/testdata/deployment_basic_settings_config_import.tf +++ b/ec/acc/testdata/deployment_basic_settings_config_import.tf @@ -40,6 +40,8 @@ resource "ec_deployment" "basic" { autoscaling = {} } } + + config = {} } kibana = { diff --git a/ec/acc/testdata/deployment_elasticsearch_keystore_creds.json b/ec/acc/testdata/deployment_elasticsearch_keystore_creds.json index b8e5d8443..337663782 100644 --- a/ec/acc/testdata/deployment_elasticsearch_keystore_creds.json +++ b/ec/acc/testdata/deployment_elasticsearch_keystore_creds.json @@ -9,4 +9,4 @@ "token_uri": "https://accounts.google.com/o/oauth2/token", "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/service-account-email" -} +} \ No newline at end of file From b81d334729b08c8837b10381d6f2617cb8e8e39a Mon Sep 17 00:00:00 2001 From: Dmytro Onishchenko Date: Mon, 6 Feb 2023 12:05:47 +0100 Subject: [PATCH 104/104] fix regex expressions in acc tests for pre node_roles --- ec/acc/testdata/deployment_pre_node_roles_migration_1.tf | 2 +- ec/acc/testdata/deployment_pre_node_roles_migration_2.tf | 2 +- ec/acc/testdata/deployment_pre_node_roles_migration_3.tf | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ec/acc/testdata/deployment_pre_node_roles_migration_1.tf b/ec/acc/testdata/deployment_pre_node_roles_migration_1.tf index 28c45b6a5..797c560b4 100644 --- a/ec/acc/testdata/deployment_pre_node_roles_migration_1.tf +++ b/ec/acc/testdata/deployment_pre_node_roles_migration_1.tf @@ -1,5 +1,5 @@ data "ec_stack" "pre_node_roles" { - version_regex = "7.9.?" + version_regex = "^7\\.9\\.\\d{1,2}$" region = "%s" } diff --git a/ec/acc/testdata/deployment_pre_node_roles_migration_2.tf b/ec/acc/testdata/deployment_pre_node_roles_migration_2.tf index c1934b5ac..f958fc44f 100644 --- a/ec/acc/testdata/deployment_pre_node_roles_migration_2.tf +++ b/ec/acc/testdata/deployment_pre_node_roles_migration_2.tf @@ -1,5 +1,5 @@ data "ec_stack" "pre_node_roles" { - version_regex = "7.??.?" + version_regex = "^7\\.\\d{1,2}\\.\\d{1,2}$" region = "%s" } diff --git a/ec/acc/testdata/deployment_pre_node_roles_migration_3.tf b/ec/acc/testdata/deployment_pre_node_roles_migration_3.tf index de80cdb00..c46eb113b 100644 --- a/ec/acc/testdata/deployment_pre_node_roles_migration_3.tf +++ b/ec/acc/testdata/deployment_pre_node_roles_migration_3.tf @@ -1,5 +1,5 @@ data "ec_stack" "pre_node_roles" { - version_regex = "7.??.?" + version_regex = "^7\\.\\d{1,2}\\.\\d{1,2}$" region = "%s" }