diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 0f80cd020b..16bce146ab 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -8,6 +8,7 @@ ### New Features and Improvements +* Add `provider_config` support for SDKv2 compatible plugin framework resources and data sources([#5115](https://github.com/databricks/terraform-provider-databricks/pull/5115)) * Optimize `databricks_grant` and `databricks_grants` to not call the `Update` API if the requested permissions are already granted ([#5095](https://github.com/databricks/terraform-provider-databricks/pull/5095)) * Added `expected_workspace_status` to `databricks_mws_workspaces` to support creating workspaces in provisioning status ([#5019](https://github.com/databricks/terraform-provider-databricks/pull/5019)) diff --git a/docs/resources/library.md b/docs/resources/library.md index d13b23f159..f1d601474b 100644 --- a/docs/resources/library.md +++ b/docs/resources/library.md @@ -127,6 +127,38 @@ resource "databricks_library" "rkeops" { } ``` +## Argument Reference + +The following arguments are supported: + +* `cluster_id` - (Required) ID of the [databricks_cluster](cluster.md) to install the library on. + +You must specify exactly **one** of the following library types: + +* `jar` - (Optional) Path to the JAR library. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs. For example: `/Workspace/path/to/library.jar`, `/Volumes/path/to/library.jar` or `s3://my-bucket/library.jar`. If S3 is used, make sure the cluster has read access to the library. You may need to launch the cluster with an IAM role to access the S3 URI. + +* `egg` - (Optional, Deprecated) Path to the EGG library. Installing Python egg files is deprecated and is not supported in Databricks Runtime 14.0 and above. Use `whl` or `pypi` instead. + +* `whl` - (Optional) Path to the wheel library. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs. For example: `/Workspace/path/to/library.whl`, `/Volumes/path/to/library.whl` or `s3://my-bucket/library.whl`. If S3 is used, make sure the cluster has read access to the library. You may need to launch the cluster with an IAM role to access the S3 URI. + +* `requirements` - (Optional) Path to the requirements.txt file. Only Workspace paths and Unity Catalog Volumes paths are supported. For example: `/Workspace/path/to/requirements.txt` or `/Volumes/path/to/requirements.txt`. Requires a cluster with DBR 15.0+. + +* `maven` - (Optional) Configuration block for a Maven library. The block consists of the following fields: + * `coordinates` - (Required) Gradle-style Maven coordinates. For example: `org.jsoup:jsoup:1.7.2`. + * `repo` - (Optional) Maven repository to install the Maven package from. If omitted, both Maven Central Repository and Spark Packages are searched. + * `exclusions` - (Optional) List of dependencies to exclude. For example: `["slf4j:slf4j", "*:hadoop-client"]`. See [Maven dependency exclusions](https://maven.apache.org/guides/introduction/introduction-to-optional-and-excludes-dependencies.html) for more information. + +* `pypi` - (Optional) Configuration block for a PyPI library. The block consists of the following fields: + * `package` - (Required) The name of the PyPI package to install. An optional exact version specification is also supported. For example: `simplejson` or `simplejson==3.8.0`. + * `repo` - (Optional) The repository where the package can be found. If not specified, the default pip index is used. + +* `cran` - (Optional) Configuration block for a CRAN library. The block consists of the following fields: + * `package` - (Required) The name of the CRAN package to install. + * `repo` - (Optional) The repository where the package can be found. If not specified, the default CRAN repo is used. + +* `provider_config` - (Optional) Configuration block for management through the account provider. This block consists of the following fields: + * `workspace_id` - (Required) Workspace ID that the resource belongs to. This workspace must be part of the account that the provider is configured with. + ## Import !> Importing this resource is not currently supported. diff --git a/docs/resources/quality_monitor.md b/docs/resources/quality_monitor.md index 7ddd8ac866..c067a4cdf0 100644 --- a/docs/resources/quality_monitor.md +++ b/docs/resources/quality_monitor.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_quality_monitor Resource -This resource allows you to manage [Lakehouse Monitors](https://docs.databricks.com/en/lakehouse-monitoring/index.html) in Databricks. +This resource allows you to manage [Lakehouse Monitors](https://docs.databricks.com/en/lakehouse-monitoring/index.html) in Databricks. -> This resource can only be used with a workspace-level provider! @@ -120,6 +120,8 @@ table. * `skip_builtin_dashboard` - Whether to skip creating a default dashboard summarizing data quality metrics. (Can't be updated after creation). * `slicing_exprs` - List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For high-cardinality columns, only the top 100 unique values by frequency will generate slices. * `warehouse_id` - Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used. (Can't be updated after creation) +* `provider_config` - (Optional) Configure the provider for management through account provider. This block consists of the following fields: + * `workspace_id` - (Required) Workspace ID which the resource belongs to. This workspace must be part of the account which the provider is configured with. ## Attribute Reference @@ -129,7 +131,7 @@ In addition to all arguments above, the following attributes are exported: * `monitor_version` - The version of the monitor config (e.g. 1,2,3). If negative, the monitor may be corrupted * `drift_metrics_table_name` - The full name of the drift metrics table. Format: __catalog_name__.__schema_name__.__table_name__. * `profile_metrics_table_name` - The full name of the profile metrics table. Format: __catalog_name__.__schema_name__.__table_name__. -* `status` - Status of the Monitor +* `status` - Status of the Monitor * `dashboard_id` - The ID of the generated dashboard. ## Related Resources diff --git a/docs/resources/share.md b/docs/resources/share.md index 45a9d5f37b..94a0f9a123 100644 --- a/docs/resources/share.md +++ b/docs/resources/share.md @@ -85,6 +85,8 @@ The following arguments are required: * `name` - (Required) Name of share. Change forces creation of a new resource. * `owner` - (Optional) User name/group name/sp application_id of the share owner. * `comment` - (Optional) User-supplied free-form text. +* `provider_config` - (Optional) Configure the provider for management through account provider. This block consists of the following fields: + * `workspace_id` - (Required) Workspace ID which the resource belongs to. This workspace must be part of the account which the provider is configured with. ### object Configuration Block diff --git a/internal/providers/pluginfw/products/library/resource_library.go b/internal/providers/pluginfw/products/library/resource_library.go index ea2aefecd8..21e0ac56ee 100644 --- a/internal/providers/pluginfw/products/library/resource_library.go +++ b/internal/providers/pluginfw/products/library/resource_library.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "reflect" "time" "github.com/databricks/databricks-sdk-go/apierr" @@ -16,6 +17,7 @@ import ( "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/tfschema" "github.com/databricks/terraform-provider-databricks/internal/service/compute_tf" "github.com/databricks/terraform-provider-databricks/libraries" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" @@ -76,6 +78,13 @@ type LibraryExtended struct { compute_tf.Library_SdkV2 ClusterId types.String `tfsdk:"cluster_id"` ID types.String `tfsdk:"id"` // Adding ID field to stay compatible with SDKv2 + tfschema.Namespace_SdkV2 +} + +func (l LibraryExtended) GetComplexFieldTypes(ctx context.Context) map[string]reflect.Type { + attrs := l.Library_SdkV2.GetComplexFieldTypes(ctx) + attrs["provider_config"] = reflect.TypeOf(tfschema.ProviderConfig{}) + return attrs } type LibraryResource struct { @@ -107,6 +116,7 @@ func (r *LibraryResource) Schema(ctx context.Context, req resource.SchemaRequest c.SetOptional("id") c.SetComputed("id") c.SetDeprecated(clusters.EggDeprecationWarning, "egg") + c.AddValidator(listvalidator.SizeAtMost(1), "provider_config") return c }) resp.Schema = schema.Schema{ @@ -124,13 +134,20 @@ func (r *LibraryResource) Configure(ctx context.Context, req resource.ConfigureR func (r *LibraryResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { ctx = pluginfwcontext.SetUserAgentInResourceContext(ctx, resourceName) - w, diags := r.Client.GetWorkspaceClient() + var libraryTfSDK LibraryExtended + resp.Diagnostics.Append(req.Plan.Get(ctx, &libraryTfSDK)...) + if resp.Diagnostics.HasError() { + return + } + + workspaceID, diags := tfschema.GetWorkspaceID_SdkV2(ctx, libraryTfSDK.ProviderConfig) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } - var libraryTfSDK LibraryExtended - resp.Diagnostics.Append(req.Plan.Get(ctx, &libraryTfSDK)...) + + w, diags := r.Client.GetWorkspaceClientForUnifiedProviderWithDiagnostics(ctx, workspaceID) + resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } @@ -171,21 +188,30 @@ func (r *LibraryResource) Create(ctx context.Context, req resource.CreateRequest } installedLib.ID = types.StringValue(libGoSDK.String()) + installedLib.ProviderConfig = libraryTfSDK.ProviderConfig resp.Diagnostics.Append(resp.State.Set(ctx, installedLib)...) } func (r *LibraryResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { ctx = pluginfwcontext.SetUserAgentInResourceContext(ctx, resourceName) - w, diags := r.Client.GetWorkspaceClient() + var libraryTfSDK LibraryExtended + resp.Diagnostics.Append(req.State.Get(ctx, &libraryTfSDK)...) + if resp.Diagnostics.HasError() { + return + } + + workspaceID, diags := tfschema.GetWorkspaceID_SdkV2(ctx, libraryTfSDK.ProviderConfig) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } - var libraryTfSDK LibraryExtended - resp.Diagnostics.Append(req.State.Get(ctx, &libraryTfSDK)...) + + w, diags := r.Client.GetWorkspaceClientForUnifiedProviderWithDiagnostics(ctx, workspaceID) + resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } + var libGoSDK compute.Library resp.Diagnostics.Append(converters.TfSdkToGoSdkStruct(ctx, libraryTfSDK, &libGoSDK)...) if resp.Diagnostics.HasError() { @@ -209,6 +235,7 @@ func (r *LibraryResource) Read(ctx context.Context, req resource.ReadRequest, re return } + installedLib.ProviderConfig = libraryTfSDK.ProviderConfig resp.Diagnostics.Append(resp.State.Set(ctx, installedLib)...) } @@ -218,16 +245,24 @@ func (r *LibraryResource) Update(ctx context.Context, req resource.UpdateRequest func (r *LibraryResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { ctx = pluginfwcontext.SetUserAgentInResourceContext(ctx, resourceName) - w, diags := r.Client.GetWorkspaceClient() + var libraryTfSDK LibraryExtended + resp.Diagnostics.Append(req.State.Get(ctx, &libraryTfSDK)...) + if resp.Diagnostics.HasError() { + return + } + + workspaceID, diags := tfschema.GetWorkspaceID_SdkV2(ctx, libraryTfSDK.ProviderConfig) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } - var libraryTfSDK LibraryExtended - resp.Diagnostics.Append(req.State.Get(ctx, &libraryTfSDK)...) + + w, diags := r.Client.GetWorkspaceClientForUnifiedProviderWithDiagnostics(ctx, workspaceID) + resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } + clusterID := libraryTfSDK.ClusterId.ValueString() var libGoSDK compute.Library resp.Diagnostics.Append(converters.TfSdkToGoSdkStruct(ctx, libraryTfSDK, &libGoSDK)...) diff --git a/internal/providers/pluginfw/products/qualitymonitor/resource_quality_monitor.go b/internal/providers/pluginfw/products/qualitymonitor/resource_quality_monitor.go index 99849c4439..92c8b4011b 100644 --- a/internal/providers/pluginfw/products/qualitymonitor/resource_quality_monitor.go +++ b/internal/providers/pluginfw/products/qualitymonitor/resource_quality_monitor.go @@ -16,6 +16,7 @@ import ( "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/converters" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/tfschema" "github.com/databricks/terraform-provider-databricks/internal/service/catalog_tf" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" @@ -62,12 +63,15 @@ type MonitorInfoExtended struct { WarehouseId types.String `tfsdk:"warehouse_id"` SkipBuiltinDashboard types.Bool `tfsdk:"skip_builtin_dashboard"` ID types.String `tfsdk:"id"` // Adding ID field to stay compatible with SDKv2 + tfschema.Namespace_SdkV2 } var _ pluginfwcommon.ComplexFieldTypeProvider = MonitorInfoExtended{} func (m MonitorInfoExtended) GetComplexFieldTypes(ctx context.Context) map[string]reflect.Type { - return m.MonitorInfo_SdkV2.GetComplexFieldTypes(ctx) + attrs := m.MonitorInfo_SdkV2.GetComplexFieldTypes(ctx) + attrs["provider_config"] = reflect.TypeOf(tfschema.ProviderConfig{}) + return attrs } type QualityMonitorResource struct { @@ -92,6 +96,7 @@ func (r *QualityMonitorResource) Schema(ctx context.Context, req resource.Schema c.SetOptional("skip_builtin_dashboard") c.SetComputed("id") c.SetOptional("id") + c.AddValidator(listvalidator.SizeAtMost(1), "provider_config") return c }) resp.Schema = schema.Schema{ @@ -113,13 +118,20 @@ func (d *QualityMonitorResource) ImportState(ctx context.Context, req resource.I func (r *QualityMonitorResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { ctx = pluginfwcontext.SetUserAgentInResourceContext(ctx, resourceName) - w, diags := r.Client.GetWorkspaceClient() + var monitorInfoTfSDK MonitorInfoExtended + resp.Diagnostics.Append(req.Plan.Get(ctx, &monitorInfoTfSDK)...) + if resp.Diagnostics.HasError() { + return + } + + workspaceID, diags := tfschema.GetWorkspaceID_SdkV2(ctx, monitorInfoTfSDK.ProviderConfig) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } - var monitorInfoTfSDK MonitorInfoExtended - resp.Diagnostics.Append(req.Plan.Get(ctx, &monitorInfoTfSDK)...) + + w, diags := r.Client.GetWorkspaceClientForUnifiedProviderWithDiagnostics(ctx, workspaceID) + resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } @@ -150,25 +162,34 @@ func (r *QualityMonitorResource) Create(ctx context.Context, req resource.Create // We need it to fill additional fields as they are not returned by the API newMonitorInfoTfSDK.WarehouseId = monitorInfoTfSDK.WarehouseId newMonitorInfoTfSDK.SkipBuiltinDashboard = monitorInfoTfSDK.SkipBuiltinDashboard + newMonitorInfoTfSDK.ProviderConfig = monitorInfoTfSDK.ProviderConfig resp.Diagnostics.Append(resp.State.Set(ctx, newMonitorInfoTfSDK)...) } func (r *QualityMonitorResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { ctx = pluginfwcontext.SetUserAgentInResourceContext(ctx, resourceName) - w, diags := r.Client.GetWorkspaceClient() + + var monitorInfoTfSDK MonitorInfoExtended + resp.Diagnostics.Append(req.State.Get(ctx, &monitorInfoTfSDK)...) + if resp.Diagnostics.HasError() { + return + } + + workspaceID, diags := tfschema.GetWorkspaceID_SdkV2(ctx, monitorInfoTfSDK.ProviderConfig) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } - var getMonitor catalog_tf.GetQualityMonitorRequest - resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("table_name"), &getMonitor.TableName)...) + w, diags := r.Client.GetWorkspaceClientForUnifiedProviderWithDiagnostics(ctx, workspaceID) + resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } + endpoint, err := w.QualityMonitors.Get(ctx, catalog.GetQualityMonitorRequest{ - TableName: getMonitor.TableName.ValueString(), + TableName: monitorInfoTfSDK.TableName.ValueString(), }) if err != nil { if apierr.IsMissing(err) { @@ -178,38 +199,26 @@ func (r *QualityMonitorResource) Read(ctx context.Context, req resource.ReadRequ resp.Diagnostics.AddError("failed to get monitor", err.Error()) return } - var monitorInfoTfSDK MonitorInfoExtended - resp.Diagnostics.Append(converters.GoSdkToTfSdkStruct(ctx, endpoint, &monitorInfoTfSDK)...) + var newMonitorInfoTfSDK MonitorInfoExtended + resp.Diagnostics.Append(converters.GoSdkToTfSdkStruct(ctx, endpoint, &newMonitorInfoTfSDK)...) if resp.Diagnostics.HasError() { return } - monitorInfoTfSDK.ID = monitorInfoTfSDK.TableName - // We need it to fill additional fields as they are not returned by the API - var origWarehouseId types.String - var origSkipBuiltinDashboard types.Bool - resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("warehouse_id"), &origWarehouseId)...) - resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("skip_builtin_dashboard"), &origSkipBuiltinDashboard)...) - if resp.Diagnostics.HasError() { - return - } - if origWarehouseId.ValueString() != "" { - monitorInfoTfSDK.WarehouseId = origWarehouseId + newMonitorInfoTfSDK.ID = monitorInfoTfSDK.TableName + if monitorInfoTfSDK.WarehouseId.ValueString() != "" { + newMonitorInfoTfSDK.WarehouseId = monitorInfoTfSDK.WarehouseId } - if origSkipBuiltinDashboard.ValueBool() { - monitorInfoTfSDK.SkipBuiltinDashboard = origSkipBuiltinDashboard + if monitorInfoTfSDK.SkipBuiltinDashboard.ValueBool() { + newMonitorInfoTfSDK.SkipBuiltinDashboard = monitorInfoTfSDK.SkipBuiltinDashboard } - resp.Diagnostics.Append(resp.State.Set(ctx, monitorInfoTfSDK)...) + newMonitorInfoTfSDK.ProviderConfig = monitorInfoTfSDK.ProviderConfig + resp.Diagnostics.Append(resp.State.Set(ctx, newMonitorInfoTfSDK)...) } func (r *QualityMonitorResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { ctx = pluginfwcontext.SetUserAgentInResourceContext(ctx, resourceName) - w, diags := r.Client.GetWorkspaceClient() - resp.Diagnostics.Append(diags...) - if resp.Diagnostics.HasError() { - return - } var monitorInfoTfSDK MonitorInfoExtended resp.Diagnostics.Append(req.Plan.Get(ctx, &monitorInfoTfSDK)...) @@ -230,6 +239,19 @@ func (r *QualityMonitorResource) Update(ctx context.Context, req resource.Update if updateMonitorGoSDK.Schedule != nil { updateMonitorGoSDK.Schedule.PauseStatus = "" } + + workspaceID, diags := tfschema.GetWorkspaceID_SdkV2(ctx, monitorInfoTfSDK.ProviderConfig) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + w, diags := r.Client.GetWorkspaceClientForUnifiedProviderWithDiagnostics(ctx, workspaceID) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + monitor, err := w.QualityMonitors.Update(ctx, updateMonitorGoSDK) if err != nil { resp.Diagnostics.AddError("failed to update monitor", err.Error()) @@ -252,24 +274,33 @@ func (r *QualityMonitorResource) Update(ctx context.Context, req resource.Update return } + newMonitorInfoTfSDK.ProviderConfig = monitorInfoTfSDK.ProviderConfig resp.Diagnostics.Append(resp.State.Set(ctx, newMonitorInfoTfSDK)...) } func (r *QualityMonitorResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { ctx = pluginfwcontext.SetUserAgentInResourceContext(ctx, resourceName) - w, diags := r.Client.GetWorkspaceClient() + + var monitorInfoTfSDK MonitorInfoExtended + resp.Diagnostics.Append(req.State.Get(ctx, &monitorInfoTfSDK)...) + if resp.Diagnostics.HasError() { + return + } + + workspaceID, diags := tfschema.GetWorkspaceID_SdkV2(ctx, monitorInfoTfSDK.ProviderConfig) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } - var deleteRequest catalog_tf.DeleteQualityMonitorRequest - resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("table_name"), &deleteRequest.TableName)...) + w, diags := r.Client.GetWorkspaceClientForUnifiedProviderWithDiagnostics(ctx, workspaceID) + resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } + _, err := w.QualityMonitors.Delete(ctx, catalog.DeleteQualityMonitorRequest{ - TableName: deleteRequest.TableName.ValueString(), + TableName: monitorInfoTfSDK.TableName.ValueString(), }) if err != nil && !apierr.IsMissing(err) { resp.Diagnostics.AddError("failed to delete monitor", err.Error()) diff --git a/internal/providers/pluginfw/products/sharing/resource_share.go b/internal/providers/pluginfw/products/sharing/resource_share.go index 672e758bed..14f88aef87 100644 --- a/internal/providers/pluginfw/products/sharing/resource_share.go +++ b/internal/providers/pluginfw/products/sharing/resource_share.go @@ -13,6 +13,7 @@ import ( "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/converters" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/tfschema" "github.com/databricks/terraform-provider-databricks/internal/service/sharing_tf" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" @@ -32,13 +33,16 @@ func ResourceShare() resource.Resource { type ShareInfoExtended struct { sharing_tf.ShareInfo_SdkV2 + tfschema.Namespace_SdkV2 ID types.String `tfsdk:"id"` // Adding ID field to stay compatible with SDKv2 } var _ pluginfwcommon.ComplexFieldTypeProvider = ShareInfoExtended{} func (s ShareInfoExtended) GetComplexFieldTypes(ctx context.Context) map[string]reflect.Type { - return s.ShareInfo_SdkV2.GetComplexFieldTypes(ctx) + types := s.ShareInfo_SdkV2.GetComplexFieldTypes(ctx) + types["provider_config"] = reflect.TypeOf(tfschema.ProviderConfig{}) + return types } func matchOrder[T any, K comparable](target, reference []T, keyFunc func(T) K) { @@ -160,6 +164,9 @@ func (r *ShareResource) Schema(ctx context.Context, req resource.SchemaRequest, c.SetComputed("id") + // Ensure provider_config list has at most 1 element + c.AddValidator(listvalidator.SizeAtMost(1), "provider_config") + return c }) resp.Schema = schema.Schema{ @@ -177,11 +184,7 @@ func (d *ShareResource) Configure(ctx context.Context, req resource.ConfigureReq func (r *ShareResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { ctx = pluginfwcontext.SetUserAgentInResourceContext(ctx, resourceName) - w, diags := r.Client.GetWorkspaceClient() - resp.Diagnostics.Append(diags...) - if resp.Diagnostics.HasError() { - return - } + var plan ShareInfoExtended resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) if resp.Diagnostics.HasError() { @@ -199,6 +202,19 @@ func (r *ShareResource) Create(ctx context.Context, req resource.CreateRequest, if resp.Diagnostics.HasError() { return } + + workspaceID, diags := tfschema.GetWorkspaceID_SdkV2(ctx, plan.ProviderConfig) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + w, clientDiags := r.Client.GetWorkspaceClientForUnifiedProviderWithDiagnostics(ctx, workspaceID) + resp.Diagnostics.Append(clientDiags...) + if resp.Diagnostics.HasError() { + return + } + shareInfo, err := w.Shares.Create(ctx, createShare) if err != nil { resp.Diagnostics.AddError("failed to create share", err.Error()) @@ -252,15 +268,21 @@ func (r *ShareResource) Read(ctx context.Context, req resource.ReadRequest, resp return } - w, diags := r.Client.GetWorkspaceClient() + var getShareRequest sharing.GetShareRequest + getShareRequest.IncludeSharedData = true + resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("name"), &getShareRequest.Name)...) + if resp.Diagnostics.HasError() { + return + } + + workspaceID, diags := tfschema.GetWorkspaceID_SdkV2(ctx, existingState.ProviderConfig) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } - var getShareRequest sharing.GetShareRequest - getShareRequest.IncludeSharedData = true - resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("name"), &getShareRequest.Name)...) + w, clientDiags := r.Client.GetWorkspaceClientForUnifiedProviderWithDiagnostics(ctx, workspaceID) + resp.Diagnostics.Append(clientDiags...) if resp.Diagnostics.HasError() { return } @@ -302,12 +324,6 @@ func (r *ShareResource) Update(ctx context.Context, req resource.UpdateRequest, return } - client, diags := r.Client.GetWorkspaceClient() - resp.Diagnostics.Append(diags...) - if resp.Diagnostics.HasError() { - return - } - var plan ShareInfoExtended resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) if resp.Diagnostics.HasError() { @@ -324,7 +340,18 @@ func (r *ShareResource) Update(ctx context.Context, req resource.UpdateRequest, getShareRequest.Name = state.Name.ValueString() getShareRequest.IncludeSharedData = true - currentShareInfo, err := client.Shares.Get(ctx, getShareRequest) + workspaceID, diags := tfschema.GetWorkspaceID_SdkV2(ctx, plan.ProviderConfig) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + w, clientDiags := r.Client.GetWorkspaceClientForUnifiedProviderWithDiagnostics(ctx, workspaceID) + resp.Diagnostics.Append(clientDiags...) + if resp.Diagnostics.HasError() { + return + } + currentShareInfo, err := w.Shares.Get(ctx, getShareRequest) if err != nil { resp.Diagnostics.AddError("failed to get current share info", err.Error()) return @@ -337,7 +364,7 @@ func (r *ShareResource) Update(ctx context.Context, req resource.UpdateRequest, // if owner has changed, update the share owner if !plan.Owner.IsNull() { - updatedShareInfo, err := client.Shares.Update(ctx, sharing.UpdateShare{ + updatedShareInfo, err := w.Shares.Update(ctx, sharing.UpdateShare{ Name: state.Name.ValueString(), Owner: plan.Owner.ValueString(), }) @@ -362,12 +389,12 @@ func (r *ShareResource) Update(ctx context.Context, req resource.UpdateRequest, if !plan.Comment.IsNull() { update.Comment = plan.Comment.ValueString() } - upToDateShareInfo, err = client.Shares.Update(ctx, update) + upToDateShareInfo, err = w.Shares.Update(ctx, update) if err != nil { resp.Diagnostics.AddError("failed to update share", err.Error()) - rollbackShareInfo, rollbackErr := client.Shares.Update(ctx, sharing.UpdateShare{ + rollbackShareInfo, rollbackErr := w.Shares.Update(ctx, sharing.UpdateShare{ Name: currentShareInfo.Name, Owner: currentShareInfo.Owner, }) @@ -402,8 +429,8 @@ func (r *ShareResource) Update(ctx context.Context, req resource.UpdateRequest, func (r *ShareResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { ctx = pluginfwcontext.SetUserAgentInResourceContext(ctx, resourceName) - w, diags := r.Client.GetWorkspaceClient() - resp.Diagnostics.Append(diags...) + var state ShareInfoExtended + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) if resp.Diagnostics.HasError() { return } @@ -413,6 +440,18 @@ func (r *ShareResource) Delete(ctx context.Context, req resource.DeleteRequest, if resp.Diagnostics.HasError() { return } + + workspaceID, diags := tfschema.GetWorkspaceID_SdkV2(ctx, state.ProviderConfig) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + w, clientDiags := r.Client.GetWorkspaceClientForUnifiedProviderWithDiagnostics(ctx, workspaceID) + resp.Diagnostics.Append(clientDiags...) + if resp.Diagnostics.HasError() { + return + } err := w.Shares.DeleteByName(ctx, deleteShareRequest.Name.ValueString()) if err != nil && !apierr.IsMissing(err) { resp.Diagnostics.AddError("failed to delete share", err.Error()) @@ -473,5 +512,6 @@ func (r *ShareResource) syncEffectiveFields(ctx context.Context, existingState, finalObjects = append(finalObjects, newStateObjects[i]) } newState.SetObjects(ctx, finalObjects) + newState.ProviderConfig = existingState.ProviderConfig // Preserve provider_config from existing state return newState, d } diff --git a/internal/providers/pluginfw/products/sharing/resource_share_acc_test.go b/internal/providers/pluginfw/products/sharing/resource_share_acc_test.go index 1a5054c585..e82d65f51a 100644 --- a/internal/providers/pluginfw/products/sharing/resource_share_acc_test.go +++ b/internal/providers/pluginfw/products/sharing/resource_share_acc_test.go @@ -4,6 +4,8 @@ import ( "context" "fmt" "maps" + "regexp" + "strconv" "testing" "github.com/databricks/databricks-sdk-go" @@ -11,6 +13,8 @@ import ( "github.com/databricks/terraform-provider-databricks/internal/acceptance" "github.com/databricks/terraform-provider-databricks/internal/providers" "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -609,3 +613,170 @@ func TestUcAccUpdateShareOutsideTerraform(t *testing.T) { }`, }) } + +func shareTemplate(provider_config string) string { + return fmt.Sprintf(` + resource "databricks_share" "myshare" { + name = "{var.STICKY_RANDOM}-share-config" + %s + object { + name = databricks_schema.schema1.id + data_object_type = "SCHEMA" + } + } +`, provider_config) +} + +func TestAccShare_ProviderConfig_Invalid(t *testing.T) { + acceptance.UnityWorkspaceLevel(t, acceptance.Step{ + Template: preTestTemplateSchema + shareTemplate(` + provider_config { + workspace_id = "invalid" + } + `), + ExpectError: regexp.MustCompile( + `(?s)Attribute provider_config\[0\]\.workspace_id ` + + `workspace_id must be a valid.*integer, got: invalid`, + ), + PlanOnly: true, + }) +} + +func TestAccShare_ProviderConfig_Mismatched(t *testing.T) { + acceptance.UnityWorkspaceLevel(t, acceptance.Step{ + Template: preTestTemplateSchema + shareTemplate(` + provider_config { + workspace_id = "123" + } + `), + ExpectError: regexp.MustCompile( + `(?s)failed to get workspace client.*workspace_id mismatch` + + `.*please check the workspace_id provided in ` + + `provider_config`, + ), + }) +} + +func TestAccShare_ProviderConfig_Multiple(t *testing.T) { + acceptance.UnityWorkspaceLevel(t, acceptance.Step{ + Template: preTestTemplateSchema + shareTemplate(` + provider_config { + workspace_id = "123" + } + provider_config { + workspace_id = "456" + } + `), + ExpectError: regexp.MustCompile( + `Attribute provider_config list must contain at most 1 element`, + ), + PlanOnly: true, + }) +} + +func TestAccShare_ProviderConfig_Required(t *testing.T) { + acceptance.UnityWorkspaceLevel(t, acceptance.Step{ + Template: preTestTemplateSchema + shareTemplate(` + provider_config { + } + `), + ExpectError: regexp.MustCompile(`(?s).*workspace_id.*is required`), + }) +} + +func TestAccShare_ProviderConfig_EmptyID(t *testing.T) { + acceptance.UnityWorkspaceLevel(t, acceptance.Step{ + Template: preTestTemplateSchema + shareTemplate(` + provider_config { + workspace_id = "" + } + `), + ExpectError: regexp.MustCompile(`Attribute provider_config\[0\]\.workspace_id string length must be at least 1`), + PlanOnly: true, + }) +} + +func TestAccShare_ProviderConfig_NotProvided(t *testing.T) { + acceptance.UnityWorkspaceLevel(t, acceptance.Step{ + Template: preTestTemplateSchema + shareTemplate(""), + }) +} + +func TestAccShare_ProviderConfig_Match(t *testing.T) { + acceptance.LoadUcwsEnv(t) + ctx := context.Background() + w := databricks.Must(databricks.NewWorkspaceClient()) + workspaceID, err := w.CurrentWorkspaceID(ctx) + require.NoError(t, err) + workspaceIDStr := strconv.FormatInt(workspaceID, 10) + acceptance.UnityWorkspaceLevel(t, acceptance.Step{ + Template: preTestTemplateSchema + shareTemplate(""), + }, acceptance.Step{ + Template: preTestTemplateSchema + shareTemplate(fmt.Sprintf(` + provider_config { + workspace_id = "%s" + } + `, workspaceIDStr)), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction("databricks_share.myshare", plancheck.ResourceActionUpdate), + }, + }, + }) +} + +func TestAccShare_ProviderConfig_Recreate(t *testing.T) { + acceptance.LoadUcwsEnv(t) + ctx := context.Background() + w := databricks.Must(databricks.NewWorkspaceClient()) + workspaceID, err := w.CurrentWorkspaceID(ctx) + require.NoError(t, err) + workspaceIDStr := strconv.FormatInt(workspaceID, 10) + acceptance.UnityWorkspaceLevel(t, acceptance.Step{ + Template: preTestTemplateSchema + shareTemplate(""), + }, acceptance.Step{ + Template: preTestTemplateSchema + shareTemplate(fmt.Sprintf(` + provider_config { + workspace_id = "%s" + } + `, workspaceIDStr)), + }, acceptance.Step{ + Template: preTestTemplateSchema + shareTemplate(` + provider_config { + workspace_id = "123" + } + `), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction("databricks_share.myshare", plancheck.ResourceActionDestroyBeforeCreate), + }, + }, + PlanOnly: true, + ExpectNonEmptyPlan: true, + }) +} + +func TestAccShare_ProviderConfig_Remove(t *testing.T) { + acceptance.LoadUcwsEnv(t) + ctx := context.Background() + w := databricks.Must(databricks.NewWorkspaceClient()) + workspaceID, err := w.CurrentWorkspaceID(ctx) + require.NoError(t, err) + workspaceIDStr := strconv.FormatInt(workspaceID, 10) + acceptance.UnityWorkspaceLevel(t, acceptance.Step{ + Template: preTestTemplateSchema + shareTemplate(""), + }, acceptance.Step{ + Template: preTestTemplateSchema + shareTemplate(fmt.Sprintf(` + provider_config { + workspace_id = "%s" + } + `, workspaceIDStr)), + }, acceptance.Step{ + Template: preTestTemplateSchema + shareTemplate(""), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction("databricks_share.myshare", plancheck.ResourceActionUpdate), + }, + }, + }) +} diff --git a/internal/providers/pluginfw/tfschema/unified_provider.go b/internal/providers/pluginfw/tfschema/unified_provider.go index aac9ffe1a2..3aff327dd9 100644 --- a/internal/providers/pluginfw/tfschema/unified_provider.go +++ b/internal/providers/pluginfw/tfschema/unified_provider.go @@ -3,9 +3,11 @@ package tfschema import ( "context" "reflect" + "regexp" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/types" @@ -20,6 +22,10 @@ type Namespace struct { ProviderConfig types.Object `tfsdk:"provider_config"` } +type Namespace_SdkV2 struct { + ProviderConfig types.List `tfsdk:"provider_config"` +} + // ProviderConfig is used to store the provider configurations for unified terraform provider // across resources onboarded to plugin framework. type ProviderConfig struct { @@ -32,6 +38,8 @@ func (r ProviderConfig) ApplySchemaCustomizations(attrs map[string]AttributeBuil attrs["workspace_id"] = attrs["workspace_id"].(StringAttributeBuilder).AddPlanModifier( stringplanmodifier.RequiresReplaceIf(workspaceIDPlanModifier, "", "")) attrs["workspace_id"] = attrs["workspace_id"].(StringAttributeBuilder).AddValidator(stringvalidator.LengthAtLeast(1)) + attrs["workspace_id"] = attrs["workspace_id"].(StringAttributeBuilder).AddValidator( + stringvalidator.RegexMatches(regexp.MustCompile(`^\d+$`), "workspace_id must be a valid integer")) return attrs } @@ -81,6 +89,8 @@ type ProviderConfigData struct { func (r ProviderConfigData) ApplySchemaCustomizations(attrs map[string]AttributeBuilder) map[string]AttributeBuilder { attrs["workspace_id"] = attrs["workspace_id"].SetRequired() attrs["workspace_id"] = attrs["workspace_id"].(StringAttributeBuilder).AddValidator(stringvalidator.LengthAtLeast(1)) + attrs["workspace_id"] = attrs["workspace_id"].(StringAttributeBuilder).AddValidator( + stringvalidator.RegexMatches(regexp.MustCompile(`^\d+$`), "workspace_id must be a valid integer")) return attrs } @@ -107,3 +117,27 @@ func (r ProviderConfigData) Type(ctx context.Context) attr.Type { }, } } + +// GetWorkspaceID_SdkV2 extracts the workspace ID from a provider_config list (for SdkV2-compatible resources). +// It returns the workspace ID string and any diagnostics encountered during extraction. +// If the provider_config is not set, it returns an empty string with no diagnostics. +func GetWorkspaceID_SdkV2(ctx context.Context, providerConfig types.List) (string, diag.Diagnostics) { + var diags diag.Diagnostics + var workspaceID string + + if providerConfig.IsNull() || providerConfig.IsUnknown() { + return workspaceID, diags + } + + var namespaceList []ProviderConfig + diags.Append(providerConfig.ElementsAs(ctx, &namespaceList, true)...) + if diags.HasError() { + return workspaceID, diags + } + + if len(namespaceList) > 0 { + workspaceID = namespaceList[0].WorkspaceID.ValueString() + } + + return workspaceID, diags +} diff --git a/internal/providers/pluginfw/tfschema/unified_provider_test.go b/internal/providers/pluginfw/tfschema/unified_provider_test.go index 3ab1b54da1..cc7bc619ac 100644 --- a/internal/providers/pluginfw/tfschema/unified_provider_test.go +++ b/internal/providers/pluginfw/tfschema/unified_provider_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/types" @@ -65,3 +66,70 @@ func TestWorkspaceIDPlanModifier(t *testing.T) { }) } } + +func TestGetWorkspaceID_SdkV2(t *testing.T) { + ctx := context.Background() + + tests := []struct { + name string + setupProviderConfig func() types.List + expectedWorkspaceID string + expectError bool + }{ + { + name: "valid workspace ID", + setupProviderConfig: func() types.List { + providerConfig := ProviderConfig{ + WorkspaceID: types.StringValue("123456789"), + } + return types.ListValueMust( + ProviderConfig{}.Type(ctx), + []attr.Value{providerConfig.ToObjectValue(ctx)}, + ) + }, + expectedWorkspaceID: "123456789", + expectError: false, + }, + { + name: "null provider_config", + setupProviderConfig: func() types.List { + return types.ListNull(ProviderConfig{}.Type(ctx)) + }, + expectedWorkspaceID: "", + expectError: false, + }, + { + name: "unknown provider_config", + setupProviderConfig: func() types.List { + return types.ListUnknown(ProviderConfig{}.Type(ctx)) + }, + expectedWorkspaceID: "", + expectError: false, + }, + { + name: "empty list", + setupProviderConfig: func() types.List { + return types.ListValueMust( + ProviderConfig{}.Type(ctx), + []attr.Value{}, + ) + }, + expectedWorkspaceID: "", + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + providerConfigList := tt.setupProviderConfig() + workspaceID, diags := GetWorkspaceID_SdkV2(ctx, providerConfigList) + + if tt.expectError { + assert.True(t, diags.HasError(), "Expected diagnostics error") + } else { + assert.False(t, diags.HasError(), "Expected no diagnostics error") + } + assert.Equal(t, tt.expectedWorkspaceID, workspaceID, "Workspace ID mismatch") + }) + } +}