diff --git a/docs/resources/cluster_2.md b/docs/resources/cluster_2.md new file mode 100644 index 00000000..f935c772 --- /dev/null +++ b/docs/resources/cluster_2.md @@ -0,0 +1,37 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "materialize_cluster_2 Resource - terraform-provider-materialize" +subcategory: "" +description: |- + +--- + +# materialize_cluster_2 (Resource) + + + + + + +## Schema + +### Required + +- `name` (String) The identifier for the cluster. + +### Optional + +- `availability_zones` (List of String) The specific availability zones of the cluster. +- `comment` (String) **Public Preview** Comment on an object in the database. +- `disk` (Boolean) **Deprecated**. This attribute is maintained for backward compatibility with existing configurations. New users should use 'cc' sizes for disk access. Disk replicas are deprecated and will be removed in a future release. The `disk` attribute will be enabled by default for 'cc' clusters +- `idle_arrangement_merge_effort` (Number) The amount of effort to exert compacting arrangements during idle periods. This is an unstable option! It may be changed or removed at any time. +- `introspection_debugging` (Boolean) Whether to introspect the gathering of the introspection data. +- `introspection_interval` (String) The interval at which to collect introspection data. +- `ownership_role` (String) The ownership role of the object. +- `region` (String) The region to use for the resource connection. If not set, the default region is used. +- `replication_factor` (Number) The number of replicas of each dataflow-powered object to maintain. +- `size` (String) The size of the managed cluster. + +### Read-Only + +- `id` (String) The Cluster ID diff --git a/go.mod b/go.mod index 06e784f1..13535c9f 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-cmp v0.6.0 // indirect - github.com/google/uuid v1.4.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/cli v1.1.6 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect @@ -48,8 +48,11 @@ require ( github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.20.0 // indirect github.com/hashicorp/terraform-json v0.21.0 // indirect + github.com/hashicorp/terraform-plugin-framework v1.6.1 // indirect + github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 // indirect github.com/hashicorp/terraform-plugin-go v0.22.0 // indirect github.com/hashicorp/terraform-plugin-log v0.9.0 // indirect + github.com/hashicorp/terraform-plugin-mux v0.15.0 // indirect github.com/hashicorp/terraform-registry-address v0.2.3 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect github.com/hashicorp/yamux v0.1.1 // indirect @@ -84,8 +87,8 @@ require ( golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.13.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect - google.golang.org/grpc v1.61.1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect + google.golang.org/grpc v1.62.0 // indirect google.golang.org/protobuf v1.32.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v2 v2.3.0 // indirect diff --git a/go.sum b/go.sum index 9f998ccc..9db7f78e 100644 --- a/go.sum +++ b/go.sum @@ -22,6 +22,7 @@ github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgI github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= +github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= @@ -58,6 +59,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hashicorp/cli v1.1.6 h1:CMOV+/LJfL1tXCOKrgAX0uRKnzjj/mpmqNXloRSy2K8= github.com/hashicorp/cli v1.1.6/go.mod h1:MPon5QYlgjjo0BSoAiN0ESeT5fRzDjVRp+uioJ0piz4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -94,10 +97,16 @@ github.com/hashicorp/terraform-json v0.21.0 h1:9NQxbLNqPbEMze+S6+YluEdXgJmhQykRy github.com/hashicorp/terraform-json v0.21.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk= github.com/hashicorp/terraform-plugin-docs v0.18.0 h1:2bINhzXc+yDeAcafurshCrIjtdu1XHn9zZ3ISuEhgpk= github.com/hashicorp/terraform-plugin-docs v0.18.0/go.mod h1:iIUfaJpdUmpi+rI42Kgq+63jAjI8aZVTyxp3Bvk9Hg8= +github.com/hashicorp/terraform-plugin-framework v1.6.1 h1:hw2XrmUu8d8jVL52ekxim2IqDc+2Kpekn21xZANARLU= +github.com/hashicorp/terraform-plugin-framework v1.6.1/go.mod h1:aJI+n/hBPhz1J+77GdgNfk5svW12y7fmtxe/5L5IuwI= +github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 h1:HOjBuMbOEzl7snOdOoUfE2Jgeto6JOjLVQ39Ls2nksc= +github.com/hashicorp/terraform-plugin-framework-validators v0.12.0/go.mod h1:jfHGE/gzjxYz6XoUwi/aYiiKrJDeutQNUtGQXkaHklg= github.com/hashicorp/terraform-plugin-go v0.22.0 h1:1OS1Jk5mO0f5hrziWJGXXIxBrMe2j/B8E+DVGw43Xmc= github.com/hashicorp/terraform-plugin-go v0.22.0/go.mod h1:mPULV91VKss7sik6KFEcEu7HuTogMLLO/EvWCuFkRVE= github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwUYLVdlYeamldAHSxjUFADow= +github.com/hashicorp/terraform-plugin-mux v0.15.0 h1:+/+lDx0WUsIOpkAmdwBIoFU8UP9o2eZASoOnLsWbKME= +github.com/hashicorp/terraform-plugin-mux v0.15.0/go.mod h1:9ezplb1Dyq394zQ+ldB0nvy/qbNAz3mMoHHseMTMaKo= github.com/hashicorp/terraform-plugin-sdk/v2 v2.33.0 h1:qHprzXy/As0rxedphECBEQAh3R4yp6pKksKHcqZx5G8= github.com/hashicorp/terraform-plugin-sdk/v2 v2.33.0/go.mod h1:H+8tjs9TjV2w57QFVSMBQacf8k/E1XwLXGCARgViC6A= github.com/hashicorp/terraform-plugin-testing v1.7.0 h1:I6aeCyZ30z4NiI3tzyDoO6fS7YxP5xSL1ceOon3gTe8= @@ -221,6 +230,7 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -260,8 +270,12 @@ google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAs google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 h1:Jyp0Hsi0bmHXG6k9eATXoYtjd6e2UzZ1SCn/wIupY14= google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY= google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk= +google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= diff --git a/main.go b/main.go index 82e1f951..5eb62ba4 100644 --- a/main.go +++ b/main.go @@ -1,8 +1,15 @@ package main import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/plugin" + "context" + "flag" + "log" + + "github.com/hashicorp/terraform-plugin-framework/providerserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server" + "github.com/hashicorp/terraform-plugin-mux/tf5to6server" + "github.com/hashicorp/terraform-plugin-mux/tf6muxserver" "github.com/MaterializeInc/terraform-provider-materialize/pkg/provider" ) @@ -16,9 +23,50 @@ var ( ) func main() { - plugin.Serve(&plugin.ServeOpts{ - ProviderFunc: func() *schema.Provider { - return provider.Provider(version) + ctx := context.Background() + + var debug bool + + flag.BoolVar(&debug, "debug", false, "set to true to run the provider with support for debuggers like delve") + flag.Parse() + + upgradedSdkServer, err := tf5to6server.UpgradeServer( + ctx, + provider.Provider(version).GRPCProvider, + ) + + if err != nil { + log.Fatal(err) + } + + providers := []func() tfprotov6.ProviderServer{ + // disabled until ready to start using + // providerserver.NewProtocol6(provider.New(version)()), + providerserver.NewProtocol6(provider.New(version)), + func() tfprotov6.ProviderServer { + return upgradedSdkServer }, - }) + } + + muxServer, err := tf6muxserver.NewMuxServer(ctx, providers...) + + if err != nil { + log.Fatal(err) + } + + var serveOpts []tf6server.ServeOpt + + if debug { + serveOpts = append(serveOpts, tf6server.WithManagedDebug()) + } + + err = tf6server.Serve( + "registry.terraform.io/materializeinc/terraform-provider-materialize", + muxServer.ProviderServer, + serveOpts..., + ) + + if err != nil { + log.Fatal(err) + } } diff --git a/pkg/provider/framework_provider.go b/pkg/provider/framework_provider.go new file mode 100644 index 00000000..3ae230f5 --- /dev/null +++ b/pkg/provider/framework_provider.go @@ -0,0 +1,249 @@ +package provider + +import ( + "context" + "fmt" + "log" + "os" + + "github.com/MaterializeInc/terraform-provider-materialize/pkg/clients" + "github.com/MaterializeInc/terraform-provider-materialize/pkg/resources" + "github.com/MaterializeInc/terraform-provider-materialize/pkg/utils" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/provider/schema" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type MaterializeProvider struct { + // Define provider configuration and internal client here + version string + client *utils.ProviderMeta +} + +type providerModelV0 struct { + Endpoint types.String `tfsdk:"endpoint"` + CloudEndpoint types.String `tfsdk:"cloud_endpoint"` + BaseEndpoint types.String `tfsdk:"base_endpoint"` + DefaultRegion types.String `tfsdk:"default_region"` + Password types.String `tfsdk:"password"` + Database types.String `tfsdk:"database"` + SslMode types.String `tfsdk:"sslmode"` +} + +// Ensure MaterializeProvider satisfies various provider interfaces. +var _ provider.Provider = new(MaterializeProvider) + +func New(version string) provider.Provider { + return &MaterializeProvider{ + version: version, + } +} + +func (p *MaterializeProvider) Metadata(ctx context.Context, req provider.MetadataRequest, resp *provider.MetadataResponse) { + resp.TypeName = "materialize" + resp.Version = p.version +} + +func (p *MaterializeProvider) Schema(ctx context.Context, req provider.SchemaRequest, resp *provider.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "password": schema.StringAttribute{ + Description: "Materialize host. Can also come from the `MZ_PASSWORD` environment variable.", + Optional: true, + Sensitive: true, + }, + "database": schema.StringAttribute{ + Description: "The Materialize database. Can also come from the `MZ_DATABASE` environment variable. Defaults to `materialize`.", + Optional: true, + }, + "sslmode": schema.StringAttribute{ + Description: "For testing purposes, the SSL mode to use.", + Optional: true, + }, + "endpoint": schema.StringAttribute{ + Description: "The endpoint for the Materialize API.", + Optional: true, + }, + "cloud_endpoint": schema.StringAttribute{ + Description: "The endpoint for the Materialize Cloud API.", + Optional: true, + }, + "base_endpoint": schema.StringAttribute{ + Description: "The base endpoint for Materialize.", + Optional: true, + }, + "default_region": schema.StringAttribute{ + Description: "The default region if not specified in the resource", + Optional: true, + }, + }, + } +} + +func (p *MaterializeProvider) Resources(ctx context.Context) []func() resource.Resource { + return []func() resource.Resource{ + resources.NewClusterResource, + } +} + +func (p *MaterializeProvider) DataSources(ctx context.Context) []func() datasource.DataSource { + return []func() datasource.DataSource{} +} + +// Configure implements the logic from your providerConfigure function adapted for the Plugin Framework +func (p *MaterializeProvider) Configure(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) { + var config providerModelV0 + + diags := req.Config.Get(ctx, &config) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + log.Printf("[DEBUG] Provider configuration: %+v\n", config) + + // Extracting values from providerModelV0 or falling back to environment variables + password := config.Password.ValueString() + if password == "" { + password = os.Getenv("MZ_PASSWORD") + } + + database := config.Database.ValueString() + if database == "" { + database = os.Getenv("MZ_DATABASE") + if database == "" { + database = "materialize" + } + } + + sslMode := config.SslMode.ValueString() + if sslMode == "" { + sslMode = os.Getenv("MZ_SSLMODE") + if sslMode == "" { + sslMode = "require" + } + } + + endpoint := config.Endpoint.ValueString() + if endpoint == "" { + endpoint = os.Getenv("MZ_ENDPOINT") + if endpoint == "" { + endpoint = "https://admin.cloud.materialize.com" + } + } + + cloudEndpoint := config.CloudEndpoint.ValueString() + if cloudEndpoint == "" { + cloudEndpoint = os.Getenv("MZ_CLOUD_ENDPOINT") + if cloudEndpoint == "" { + cloudEndpoint = "https://api.cloud.materialize.com" + } + } + + baseEndpoint := config.BaseEndpoint.ValueString() + if baseEndpoint == "" { + baseEndpoint = os.Getenv("MZ_BASE_ENDPOINT") + if baseEndpoint == "" { + baseEndpoint = "https://cloud.materialize.com" + } + } + + defaultRegion := config.DefaultRegion.ValueString() + if defaultRegion == "" { + defaultRegion = os.Getenv("MZ_DEFAULT_REGION") + if defaultRegion == "" { + defaultRegion = "aws/us-east-1" + } + } + + applicationName := fmt.Sprintf("terraform-provider-materialize v%s", p.version) + + err := utils.SetDefaultRegion(defaultRegion) + if err != nil { + resp.Diagnostics.AddError("Failed to set default region", err.Error()) + return + } + + // Initialize the Frontegg client + fronteggClient, err := clients.NewFronteggClient(ctx, password, endpoint) + if err != nil { + resp.Diagnostics.AddError("Unable to create Frontegg client", err.Error()) + return + } + + // Initialize the Cloud API client using the Frontegg client and endpoint + cloudAPIClient := clients.NewCloudAPIClient(fronteggClient, cloudEndpoint, baseEndpoint) + regionsEnabled := make(map[clients.Region]bool) + + // Get the list of cloud providers + providers, err := cloudAPIClient.ListCloudProviders(ctx) + if err != nil { + resp.Diagnostics.AddError("Unable to list cloud providers", err.Error()) + return + } + + // Store the DB clients for all regions + dbClients := make(map[clients.Region]*clients.DBClient) + for _, provider := range providers { + regionDetails, err := cloudAPIClient.GetRegionDetails(ctx, provider) + log.Printf("[DEBUG] Region details for provider %s: %v\n", provider.ID, regionDetails) + + if err != nil { + log.Printf("[ERROR] Error getting region details for provider %s: %v\n", provider.ID, err) + continue + } + + if regionDetails == nil || regionDetails.RegionInfo == nil { + continue + } + + regionsEnabled[clients.Region(provider.ID)] = regionDetails.RegionInfo != nil && regionDetails.RegionInfo.Resolvable + + // Get the database connection details for the region + host, port, err := clients.SplitHostPort(regionDetails.RegionInfo.SqlAddress) + if err != nil { + log.Printf("[ERROR] Error splitting host and port for region %s: %v\n", provider.ID, err) + continue + } + + user := fronteggClient.Email + + // Instantiate a new DB client for the region + dbClient, diags := clients.NewDBClient(host, user, password, port, database, applicationName, p.version, sslMode) + if diags.HasError() { + log.Printf("[ERROR] Error initializing DB client for region %s: %v\n", provider.ID, diags) + continue + } + + dbClients[clients.Region(provider.ID)] = dbClient + } + + // Check if at least one region has been initialized successfully + if len(dbClients) == 0 { + resp.Diagnostics.AddError("Initialization Error", "No database regions were initialized. Please check your configuration.") + return + } + + log.Printf("[DEBUG] Initialized DB clients for regions: %v\n", dbClients) + + if resp.Diagnostics.HasError() { + return + } + + // Store the configured values in the provider instance for later use + p.client = &utils.ProviderMeta{ + DB: dbClients, + Frontegg: fronteggClient, + CloudAPI: cloudAPIClient, + DefaultRegion: clients.Region(defaultRegion), + RegionsEnabled: regionsEnabled, + } + providerData := &utils.ProviderData{ + Client: p.client, + } + resp.DataSourceData = providerData + resp.ResourceData = providerData +} diff --git a/pkg/provider/provider_test.go b/pkg/provider/provider_test.go index f02eb59b..40cae61b 100644 --- a/pkg/provider/provider_test.go +++ b/pkg/provider/provider_test.go @@ -165,3 +165,50 @@ func testAccCheckGrantDefaultPrivilegeExists(objectType, grantName, granteeName, return nil } } + +// func TestMuxServer(t *testing.T) { +// resource.Test(t, resource.TestCase{ +// ProtoV6ProviderFactories: map[string]func() (tfprotov6.ProviderServer, error){ +// "examplecloud": func() (tfprotov6.ProviderServer, error) { +// ctx := context.Background() + +// upgradedSdkServer, err := tf5to6server.UpgradeServer( +// ctx, +// Provider("dev").GRPCProvider, +// ) + +// if err != nil { +// return nil, err +// } + +// providers := []func() tfprotov6.ProviderServer{ +// // providerserver.NewProtocol6(New()), +// func() tfprotov6.ProviderServer { +// return upgradedSdkServer +// }, +// } + +// muxServer, err := tf6muxserver.NewMuxServer(ctx, providers...) + +// if err != nil { +// return nil, err +// } + +// return muxServer.ProviderServer(), nil +// }, +// }, +// Steps: []resource.TestStep{ +// { +// Config: testAccMuxServerConfig(), +// }, +// }, +// }) +// } + +// func testAccMuxServerConfig() string { +// return ` +// provider "materialize" { +// password = "mzp_1b2a3c4d5e6f7a8b9c0d1e2f3a4b5c6d7e8f9a0b1c2d3e4f5a6b7c8d9e0f1a2b" +// } +// ` +// } diff --git a/pkg/resources/resource_cluster_new.go b/pkg/resources/resource_cluster_new.go new file mode 100644 index 00000000..121e140c --- /dev/null +++ b/pkg/resources/resource_cluster_new.go @@ -0,0 +1,474 @@ +package resources + +import ( + "context" + "database/sql" + "fmt" + "log" + "strings" + + "github.com/MaterializeInc/terraform-provider-materialize/pkg/materialize" + "github.com/MaterializeInc/terraform-provider-materialize/pkg/utils" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" +) + +// Define the resource schema and methods. +type clusterResource struct { + client *utils.ProviderData +} + +func NewClusterResource() resource.Resource { + return &clusterResource{} +} + +func (r *clusterResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cluster_2" +} + +type ClusterStateModelV0 struct { + ID types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + Size types.String `tfsdk:"size"` + ReplicationFactor types.Int64 `tfsdk:"replication_factor"` + Disk types.Bool `tfsdk:"disk"` + AvailabilityZones types.List `tfsdk:"availability_zones"` + IntrospectionInterval types.String `tfsdk:"introspection_interval"` + IntrospectionDebugging types.Bool `tfsdk:"introspection_debugging"` + IdleArrangementMergeEffort types.Int64 `tfsdk:"idle_arrangement_merge_effort"` + OwnershipRole types.String `tfsdk:"ownership_role"` + Comment types.String `tfsdk:"comment"` + Region types.String `tfsdk:"region"` +} + +func ClusterSchema() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + MarkdownDescription: "The Cluster ID", + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "name": NewObjectNameSchema("cluster", true, true), + "comment": NewCommentSchema(false), + "ownership_role": NewOwnershipRoleSchema(), + "size": NewSizeSchema("managed cluster", false, false, []string{"replication_factor"}), + "replication_factor": NewReplicationFactorSchema(), + "disk": NewDiskSchema(false), + "availability_zones": NewAvailabilityZonesSchema(), + "introspection_interval": NewIntrospectionIntervalSchema(false, []string{"size"}), + "introspection_debugging": NewIntrospectionDebuggingSchema(false, []string{"size"}), + "idle_arrangement_merge_effort": NewIdleArrangementMergeEffortSchema(false, []string{"size"}), + "region": NewRegionSchema(), + } +} + +func (r *clusterResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: ClusterSchema(), + } +} + +func (r *clusterResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + client, ok := req.ProviderData.(*utils.ProviderData) + + // Verbously log the reg.ProviderData for debugging purposes. + log.Printf("[DEBUG] ProviderData contents: %+v\n", fmt.Sprintf("%+v", req.ProviderData)) + + if !ok { + resp.Diagnostics.AddError( + "Unexpected Resource Configure Type", + fmt.Sprintf("Expected *utils.ProviderMeta, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + r.client = client +} + +// Implement Create method to store the cluster name in the state. +func (r *clusterResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Initialize and retrieve values from the request's plan. + var state ClusterStateModelV0 + diags := req.Plan.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + metaDb, region, err := utils.NewGetDBClientFromMeta(r.client, state.Region.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Failed to get DB client", err.Error()) + return + } + state.Region = types.StringValue(string(region)) + + o := materialize.MaterializeObject{ObjectType: "CLUSTER", Name: state.Name.ValueString()} + b := materialize.NewClusterBuilder(metaDb, o) + + // Managed cluster options. + if !state.Size.IsNull() { + size := state.Size.ValueString() + + b.Size(size) + + if !state.ReplicationFactor.IsNull() { + r := int(state.ReplicationFactor.ValueInt64()) + b.ReplicationFactor(&r) + } + + if strings.HasSuffix(size, "cc") || strings.HasSuffix(size, "C") { + // DISK option not supported for cluster sizes ending in cc or C. + log.Printf("[WARN] disk option not supported for cluster size %s, disk is always enabled", size) + state.Disk = types.BoolValue(true) + } else if !state.Disk.IsNull() { + b.Disk(state.Disk.ValueBool()) + } + + if !state.AvailabilityZones.IsNull() && len(state.AvailabilityZones.Elements()) > 0 { + f := make([]string, len(state.AvailabilityZones.Elements())) + for i, elem := range state.AvailabilityZones.Elements() { + f[i] = elem.(types.String).ValueString() + } + b.AvailabilityZones(f) + } + + if !state.IntrospectionInterval.IsNull() { + b.IntrospectionInterval(state.IntrospectionInterval.ValueString()) + } + + if !state.IntrospectionDebugging.IsNull() && state.IntrospectionDebugging.ValueBool() { + b.IntrospectionDebugging() + } + + if !state.IdleArrangementMergeEffort.IsNull() { + b.IdleArrangementMergeEffort(int(state.IdleArrangementMergeEffort.ValueInt64())) + } + } + + // Create the resource. + if err := b.Create(); err != nil { + resp.Diagnostics.AddError("Failed to create the cluster", err.Error()) + return + } + + // Ownership. + if !state.OwnershipRole.IsNull() && state.OwnershipRole.ValueString() != "" { + ownership := materialize.NewOwnershipBuilder(metaDb, o) + + if err := ownership.Alter(state.OwnershipRole.ValueString()); err != nil { + log.Printf("[DEBUG] resource failed ownership, dropping object: %s", o.Name) + b.Drop() + resp.Diagnostics.AddError("Failed to set ownership", err.Error()) + return + } + } + + // Object comment. + if !state.Comment.IsNull() { + comment := materialize.NewCommentBuilder(metaDb, o) + + if err := comment.Object(state.Comment.ValueString()); err != nil { + log.Printf("[DEBUG] resource failed comment, dropping object: %s", o.Name) + b.Drop() + resp.Diagnostics.AddError("Failed to add comment", err.Error()) + return + } else { + state.Comment = types.StringValue(state.Comment.ValueString()) + } + } + + // Set ID. + i, err := materialize.ClusterId(metaDb, o) + if err != nil { + resp.Diagnostics.AddError("Failed to set resource ID", err.Error()) + return + } + + // After all operations are successful and you have the cluster ID: + clusterID := utils.TransformIdWithRegion(string(region), i) + + // Update the ID in the state + state.ID = types.StringValue(clusterID) + + // After the cluster is successfully created, read its current state + readState, _ := r.read(ctx, &state, false) + if resp.Diagnostics.HasError() { + return + } + + // Update the state with the freshly read information + diags = resp.State.Set(ctx, readState) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +func (r *clusterResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var state ClusterStateModelV0 + + // Retrieve the current state + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + updatedState, _ := r.read(ctx, &state, false) + + // Set the updated state in the response + diags = resp.State.Set(ctx, updatedState) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +func (r *clusterResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var plan ClusterStateModelV0 + var state ClusterStateModelV0 + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + diags = req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + metaDb, region, err := utils.NewGetDBClientFromMeta(r.client, state.Region.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Failed to get DB client", err.Error()) + return + } + state.Region = types.StringValue(string(region)) + + o := materialize.MaterializeObject{ObjectType: "CLUSTER", Name: state.Name.ValueString()} + b := materialize.NewClusterBuilder(metaDb, o) + + // Update cluster attributes if they have changed + if state.OwnershipRole.ValueString() != plan.OwnershipRole.ValueString() && plan.OwnershipRole.ValueString() != "" { + ownershipBuilder := materialize.NewOwnershipBuilder(metaDb, o) + if err := ownershipBuilder.Alter(plan.OwnershipRole.ValueString()); err != nil { + resp.Diagnostics.AddError("Failed to update ownership role", err.Error()) + return + } + } + + if state.Size.ValueString() != plan.Size.ValueString() { + if err := b.Resize(plan.Size.ValueString()); err != nil { + resp.Diagnostics.AddError("Failed to resize the cluster", err.Error()) + return + } + } + + // Handle changes in the 'disk' attribute + if state.Disk.ValueBool() != plan.Disk.ValueBool() { + if strings.HasSuffix(state.Size.ValueString(), "cc") || strings.HasSuffix(state.Size.ValueString(), "C") { + // DISK option not supported for cluster sizes ending in cc or C. + log.Printf("[WARN] disk option not supported for cluster size %s, disk is always enabled", state.Size.ValueString()) + state.Disk = types.BoolValue(true) + } else { + if err := b.SetDisk(plan.Disk.ValueBool()); err != nil { + resp.Diagnostics.AddError("Failed to update disk setting", err.Error()) + return + } + } + } + + // Handle changes in the 'replication_factor' attribute + if state.ReplicationFactor.ValueInt64() != plan.ReplicationFactor.ValueInt64() { + if err := b.SetReplicationFactor(int(plan.ReplicationFactor.ValueInt64())); err != nil { + resp.Diagnostics.AddError("Failed to update replication factor", err.Error()) + return + } + } + + // Handle changes in the 'availability_zones' attribute + if !state.AvailabilityZones.Equal(plan.AvailabilityZones) && len(plan.AvailabilityZones.Elements()) > 0 { + azs := make([]string, len(plan.AvailabilityZones.Elements())) + for i, elem := range plan.AvailabilityZones.Elements() { + azs[i] = elem.(types.String).ValueString() + } + if err := b.SetAvailabilityZones(azs); err != nil { + resp.Diagnostics.AddError("Failed to update availability zones", err.Error()) + return + } + } + + // Handle changes in the 'introspection_interval' attribute + if state.IntrospectionInterval.ValueString() != plan.IntrospectionInterval.ValueString() { + if err := b.SetIntrospectionInterval(plan.IntrospectionInterval.ValueString()); err != nil { + resp.Diagnostics.AddError("Failed to update introspection interval", err.Error()) + return + } + } + + // Handle changes in the 'introspection_debugging' attribute + if state.IntrospectionDebugging.ValueBool() != plan.IntrospectionDebugging.ValueBool() { + if err := b.SetIntrospectionDebugging(plan.IntrospectionDebugging.ValueBool()); err != nil { + resp.Diagnostics.AddError("Failed to update introspection debugging", err.Error()) + return + } + } + + // Handle changes in the 'idle_arrangement_merge_effort' attribute + if state.IdleArrangementMergeEffort.ValueInt64() != plan.IdleArrangementMergeEffort.ValueInt64() { + if err := b.SetIdleArrangementMergeEffort(int(plan.IdleArrangementMergeEffort.ValueInt64())); err != nil { + resp.Diagnostics.AddError("Failed to update idle arrangement merge effort", err.Error()) + return + } + } + + // Handle changes in the 'comment' attribute + if !state.Comment.Equal(plan.Comment) { + commentBuilder := materialize.NewCommentBuilder(metaDb, o) + if err := commentBuilder.Object(plan.Comment.ValueString()); err != nil { + resp.Diagnostics.AddError("Failed to update comment", err.Error()) + return + } else { + // If the comment update was successful, reflect the change in the state + state.Comment = types.StringValue(plan.Comment.ValueString()) + } + } + + // After updating the cluster, read its current state + updatedState, _ := r.read(ctx, &plan, false) + // Update the state with the freshly read information + diags = resp.State.Set(ctx, updatedState) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +func (r *clusterResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Retrieve the current state + var state ClusterStateModelV0 + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + metaDb, _, err := utils.NewGetDBClientFromMeta(r.client, state.Region.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Failed to get DB client", err.Error()) + return + } + + o := materialize.MaterializeObject{ObjectType: "CLUSTER", Name: state.Name.ValueString()} + b := materialize.NewClusterBuilder(metaDb, o) + + // Drop the cluster + if err := b.Drop(); err != nil { + resp.Diagnostics.AddError("Failed to delete the cluster", err.Error()) + return + } + + // After successful deletion, clear the state by setting ID to empty + state.ID = types.String{} + diags = resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +func (r *clusterResource) read(ctx context.Context, data *ClusterStateModelV0, dryRun bool) (*ClusterStateModelV0, diag.Diagnostics) { + diags := diag.Diagnostics{} + + metaDb, region, err := utils.NewGetDBClientFromMeta(r.client, data.Region.ValueString()) + if err != nil { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Failed to get DB client", + Detail: err.Error(), + }) + return data, diags + } + + clusterID := data.ID.ValueString() + clusterDetails, err := materialize.ScanCluster(metaDb, utils.ExtractId(clusterID)) + if err != nil { + if err == sql.ErrNoRows { + data.ID = types.String{} + data.Name = types.String{} + data.Size = types.String{} + data.ReplicationFactor = types.Int64{} + data.Disk = types.Bool{} + data.AvailabilityZones = types.List{} + data.IntrospectionInterval = types.String{} + data.IntrospectionDebugging = types.Bool{} + data.IdleArrangementMergeEffort = types.Int64{} + data.OwnershipRole = types.String{} + data.Comment = types.String{} + } else { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Failed to read the cluster", + Detail: err.Error(), + }) + } + return data, diags + } + + // Set the values from clusterDetails to data, ensuring that nulls and empty strings are treated consistently. + data.ID = types.StringValue(clusterID) + data.Name = types.StringValue(getNullString(clusterDetails.ClusterName)) + data.ReplicationFactor = types.Int64Value(clusterDetails.ReplicationFactor.Int64) + data.Disk = types.BoolValue(clusterDetails.Disk.Bool) + data.OwnershipRole = types.StringValue(getNullString(clusterDetails.OwnerName)) + + // Normalize empty strings to nulls for size and comment to match the old behavior + data.Size = normalizeStringToNull(clusterDetails.Size) + if clusterDetails.Comment.Valid && clusterDetails.Comment.String != "" { + data.Comment = types.StringValue(clusterDetails.Comment.String) + } else { + data.Comment = types.StringNull() + } + + regionStr := string(region) + data.Region = types.StringValue(regionStr) + + // Availability Zones + azValues := make([]attr.Value, len(clusterDetails.AvailabilityZones)) + for i, az := range clusterDetails.AvailabilityZones { + azValues[i] = types.StringValue(az) + } + data.AvailabilityZones, _ = types.ListValue(types.StringType, azValues) + + return data, diags +} + +// getNullString checks if the sql.NullString is valid and returns the string or an empty string if not. +func getNullString(ns sql.NullString) string { + if ns.Valid { + return ns.String + } + return "" +} + +// normalizeStringToNull converts an empty string or a valid null string to a Terraform null type. +func normalizeStringToNull(str sql.NullString) types.String { + if !str.Valid || str.String == "" { + return types.StringNull() + } + return types.StringValue(str.String) +} diff --git a/pkg/resources/schema_new.go b/pkg/resources/schema_new.go new file mode 100644 index 00000000..8388491a --- /dev/null +++ b/pkg/resources/schema_new.go @@ -0,0 +1,179 @@ +package resources + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework-validators/boolvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func NewObjectNameSchema(resource string, required, forceNew bool) schema.StringAttribute { + attr := schema.StringAttribute{ + Description: fmt.Sprintf("The identifier for the %s.", resource), + Required: required, + Optional: !required, + } + if forceNew { + attr.PlanModifiers = []planmodifier.String{stringplanmodifier.RequiresReplace()} + } + return attr +} + +func NewCommentSchema(forceNew bool) schema.StringAttribute { + attr := schema.StringAttribute{ + Description: "**Public Preview** Comment on an object in the database.", + Optional: true, + } + if forceNew { + attr.PlanModifiers = []planmodifier.String{stringplanmodifier.RequiresReplace()} + } + return attr +} + +func NewOwnershipRoleSchema() schema.StringAttribute { + return schema.StringAttribute{ + Description: "The ownership role of the object.", + Optional: true, + Computed: true, + } +} + +func NewSizeSchema(resource string, required bool, forceNew bool, alsoRequires []string) schema.StringAttribute { + expressions := make([]path.Expression, len(alsoRequires)) + for i, req := range alsoRequires { + expressions[i] = path.MatchRoot(req) + } + + attr := schema.StringAttribute{ + Description: fmt.Sprintf("The size of the %s.", resource), + Required: required, + Optional: !required, + Validators: []validator.String{ + stringvalidator.OneOf(replicaSizes...), + stringvalidator.AlsoRequires(expressions...), + }, + } + if forceNew { + attr.PlanModifiers = []planmodifier.String{stringplanmodifier.RequiresReplace()} + } + return attr +} + +func NewDiskSchema(forceNew bool) schema.BoolAttribute { + attr := schema.BoolAttribute{ + Description: "**Deprecated**. This attribute is maintained for backward compatibility with existing configurations. New users should use 'cc' sizes for disk access. Disk replicas are deprecated and will be removed in a future release. The `disk` attribute will be enabled by default for 'cc' clusters", + Optional: true, + Computed: true, + } + if forceNew { + attr.PlanModifiers = []planmodifier.Bool{boolplanmodifier.RequiresReplace()} + } + return attr +} + +func NewIntrospectionIntervalSchema(forceNew bool, alsoRequires []string) schema.StringAttribute { + expressions := make([]path.Expression, len(alsoRequires)) + for i, req := range alsoRequires { + expressions[i] = path.MatchRoot(req) + } + + attr := schema.StringAttribute{ + Description: "The interval at which to collect introspection data.", + Optional: true, + Computed: true, + Default: stringdefault.StaticString("1s"), + Validators: []validator.String{ + stringvalidator.AlsoRequires(expressions...), + }, + } + if forceNew { + attr.PlanModifiers = []planmodifier.String{stringplanmodifier.RequiresReplace()} + } + return attr +} + +func NewIntrospectionDebuggingSchema(forceNew bool, alsoRequires []string) schema.BoolAttribute { + expressions := make([]path.Expression, len(alsoRequires)) + for i, req := range alsoRequires { + expressions[i] = path.MatchRoot(req) + } + + attr := schema.BoolAttribute{ + Description: "Whether to introspect the gathering of the introspection data.", + Optional: true, + Computed: true, + Default: booldefault.StaticBool(false), + Validators: []validator.Bool{ + boolvalidator.AlsoRequires(expressions...), + }, + } + if forceNew { + attr.PlanModifiers = []planmodifier.Bool{boolplanmodifier.RequiresReplace()} + } + return attr +} + +func NewIdleArrangementMergeEffortSchema(forceNew bool, alsoRequires []string) schema.Int64Attribute { + expressions := make([]path.Expression, len(alsoRequires)) + for i, req := range alsoRequires { + expressions[i] = path.MatchRoot(req) + } + + attr := schema.Int64Attribute{ + Description: "The amount of effort to exert compacting arrangements during idle periods. This is an unstable option! It may be changed or removed at any time.", + Optional: true, + Validators: []validator.Int64{ + int64validator.AlsoRequires(expressions...), + }, + } + if forceNew { + attr.PlanModifiers = []planmodifier.Int64{int64planmodifier.RequiresReplace()} + } + return attr +} + +func NewRegionSchema() schema.StringAttribute { + return schema.StringAttribute{ + Description: "The region to use for the resource connection. If not set, the default region is used.", + Optional: true, + Computed: true, + // PlanModifiers: []planmodifier.String{ + // stringplanmodifier.RequiresReplace(), + // }, + } +} + +func NewReplicationFactorSchema() schema.Int64Attribute { + return schema.Int64Attribute{ + Description: "The number of replicas of each dataflow-powered object to maintain.", + Optional: true, + Computed: true, + Validators: []validator.Int64{ + int64validator.AlsoRequires(path.MatchRoot("size")), + }, + } +} + +func NewAvailabilityZonesSchema() schema.ListAttribute { + return schema.ListAttribute{ + Description: "The specific availability zones of the cluster.", + Optional: true, + Computed: true, + ElementType: types.StringType, + Validators: []validator.List{ + listvalidator.AlsoRequires(path.MatchRoot("size")), + }, + } +} diff --git a/pkg/utils/provider_meta.go b/pkg/utils/provider_meta.go index 7f2cb200..ba864187 100644 --- a/pkg/utils/provider_meta.go +++ b/pkg/utils/provider_meta.go @@ -2,6 +2,7 @@ package utils import ( "fmt" + "log" "strings" "github.com/MaterializeInc/terraform-provider-materialize/pkg/clients" @@ -34,6 +35,10 @@ type ProviderMeta struct { RegionsEnabled map[clients.Region]bool } +type ProviderData struct { + Client *ProviderMeta +} + var DefaultRegion string func GetProviderMeta(meta interface{}) (*ProviderMeta, error) { @@ -49,6 +54,19 @@ func GetProviderMeta(meta interface{}) (*ProviderMeta, error) { return providerMeta, nil } +func NewGetProviderMeta(meta interface{}) (*ProviderData, error) { + providerData := meta.(*ProviderData) + + if err := providerData.Client.Frontegg.NeedsTokenRefresh(); err != nil { + err := providerData.Client.Frontegg.RefreshToken() + if err != nil { + return nil, fmt.Errorf("failed to refresh token: %v", err) + } + } + + return providerData, nil +} + func GetDBClientFromMeta(meta interface{}, d *schema.ResourceData) (*sqlx.DB, clients.Region, error) { providerMeta, err := GetProviderMeta(meta) if err != nil { @@ -87,6 +105,47 @@ func GetDBClientFromMeta(meta interface{}, d *schema.ResourceData) (*sqlx.DB, cl return dbClient.SQLX(), region, nil } +func NewGetDBClientFromMeta(meta interface{}, regionString string) (*sqlx.DB, clients.Region, error) { + + log.Printf("[DEBUG] Received provider meta of type %T: %+v\n", meta, meta) + + providerData, err := NewGetProviderMeta(meta) + if err != nil { + return nil, "", err + } + + // Determine the region to use, if one is not specified, use the default region + var region clients.Region + if regionString != "" { + region = clients.Region(regionString) + } else { + region = providerData.Client.DefaultRegion + } + + // Check if the region is enabled using the stored information + enabled, exists := providerData.Client.RegionsEnabled[region] + if !exists { + var regions []string + for regionKey := range providerData.Client.RegionsEnabled { + regions = append(regions, string(regionKey)) + } + enabledRegions := strings.Join(regions, ", ") + return nil, region, fmt.Errorf("region not found: '%s'. Currently enabled regions: %s", region, enabledRegions) + } + + if !enabled { + return nil, region, fmt.Errorf("region '%s' is not enabled", region) + } + + // Retrieve the appropriate DBClient for the region from the map + dbClient, exists := providerData.Client.DB[region] + if !exists { + return nil, region, fmt.Errorf("no database client for region: %s", region) + } + + return dbClient.SQLX(), region, nil +} + func SetDefaultRegion(region string) error { DefaultRegion = region return nil