From 2dbe106c90c108ee5ea9302920cb9a7c0ac13dfd Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Wed, 29 Oct 2025 11:13:10 +0100 Subject: [PATCH 1/5] Added `databricks_permission` resource for managing permissions for individual principals. This resource provides fine-grained control over permissions by managing a single principal's access to a single object, unlike `databricks_permissions` which manages all principals' access to an object at once. This is particularly useful for: - Managing permissions for different teams independently - Token and password authorization permissions that previously required all principals in one resource - Avoiding conflicts when multiple configurations manage different principals on the same object --- NEXT_CHANGELOG.md | 3 +- docs/resources/permission.md | 282 ++++++++++ .../pluginfw/pluginfw_rollout_utils.go | 2 + .../products/permissions/object_id_helpers.go | 91 +++ .../permissions/object_id_helpers_test.go | 180 ++++++ .../products/permissions/permission_entity.go | 59 ++ .../permissions/permission_level_validator.go | 84 +++ .../permission_level_validator_test.go | 24 + .../permissions/resource_permission.go | 523 ++++++++++++++++++ .../permissions/resource_permission_test.go | 140 +++++ permissions/permission_definitions.go | 94 +++- permissions/resource_permission_acc_test.go | 518 +++++++++++++++++ permissions/resource_permissions.go | 20 +- permissions/resource_permissions_test.go | 2 +- 14 files changed, 1988 insertions(+), 34 deletions(-) create mode 100644 docs/resources/permission.md create mode 100644 internal/providers/pluginfw/products/permissions/object_id_helpers.go create mode 100644 internal/providers/pluginfw/products/permissions/object_id_helpers_test.go create mode 100644 internal/providers/pluginfw/products/permissions/permission_entity.go create mode 100644 internal/providers/pluginfw/products/permissions/permission_level_validator.go create mode 100644 internal/providers/pluginfw/products/permissions/permission_level_validator_test.go create mode 100644 internal/providers/pluginfw/products/permissions/resource_permission.go create mode 100644 internal/providers/pluginfw/products/permissions/resource_permission_test.go create mode 100644 permissions/resource_permission_acc_test.go diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 765eef12b6..0edf166409 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -6,7 +6,8 @@ ### New Features and Improvements -* Add `provider_config` support for manual plugin framework resources and data sources([#5127](https://github.com/databricks/terraform-provider-databricks/pull/5127)) +* Add `provider_config` support for manual plugin framework resources and data sources ([#5127](https://github.com/databricks/terraform-provider-databricks/pull/5127)) +* Added `databricks_permission` resource for managing permissions on Databricks objects for individual principals ([#5161](https://github.com/databricks/terraform-provider-databricks/pull/5161)). ### Bug Fixes diff --git a/docs/resources/permission.md b/docs/resources/permission.md new file mode 100644 index 0000000000..efb30d96ed --- /dev/null +++ b/docs/resources/permission.md @@ -0,0 +1,282 @@ +--- +subcategory: "Security" +--- + +# databricks_permission Resource + +This resource allows you to manage permissions for a single principal on a Databricks workspace object. Unlike `databricks_permissions`, which manages all principals' permissions for an object at once, this resource is authoritative for a specific object-principal pair only. + +-> This resource can only be used with a workspace-level provider! + +~> This resource is _authoritative_ for the specified object-principal pair. Configuring this resource will manage the permission for the specified principal only, without affecting permissions for other principals. + +-> Use `databricks_permissions` when you need to manage all permissions for an object in a single resource. Use `databricks_permission` (singular) when you want to manage permissions for individual principals independently. + +## Example Usage + +### Cluster Permissions + +```hcl +resource "databricks_cluster" "shared" { + cluster_name = "Shared Analytics" + spark_version = data.databricks_spark_version.latest.id + node_type_id = data.databricks_node_type.smallest.id + autotermination_minutes = 20 + + autoscale { + min_workers = 1 + max_workers = 10 + } +} + +resource "databricks_group" "data_engineers" { + display_name = "Data Engineers" +} + +# Grant CAN_RESTART permission to a group +resource "databricks_permission" "cluster_de" { + cluster_id = databricks_cluster.shared.id + group_name = databricks_group.data_engineers.display_name + permission_level = "CAN_RESTART" +} + +# Grant CAN_ATTACH_TO permission to a user +resource "databricks_permission" "cluster_analyst" { + cluster_id = databricks_cluster.shared.id + user_name = "analyst@company.com" + permission_level = "CAN_ATTACH_TO" +} +``` + +### Job Permissions + +```hcl +resource "databricks_job" "etl" { + name = "ETL Pipeline" + + task { + task_key = "process_data" + + notebook_task { + notebook_path = "/Shared/ETL" + } + + new_cluster { + num_workers = 2 + spark_version = data.databricks_spark_version.latest.id + node_type_id = data.databricks_node_type.smallest.id + } + } +} + +# Grant CAN_MANAGE to a service principal +resource "databricks_permission" "job_sp" { + job_id = databricks_job.etl.id + service_principal_name = databricks_service_principal.automation.application_id + permission_level = "CAN_MANAGE" +} + +# Grant CAN_VIEW to a group +resource "databricks_permission" "job_viewers" { + job_id = databricks_job.etl.id + group_name = "Data Viewers" + permission_level = "CAN_VIEW" +} +``` + +### Notebook Permissions + +```hcl +resource "databricks_notebook" "analysis" { + path = "/Shared/Analysis" + language = "PYTHON" + content_base64 = base64encode(<<-EOT + # Analysis Notebook + print("Hello, World!") + EOT + ) +} + +# Grant CAN_RUN to a user +resource "databricks_permission" "notebook_user" { + notebook_path = databricks_notebook.analysis.path + user_name = "data.scientist@company.com" + permission_level = "CAN_RUN" +} + +# Grant CAN_EDIT to a group +resource "databricks_permission" "notebook_editors" { + notebook_path = databricks_notebook.analysis.path + group_name = "Notebook Editors" + permission_level = "CAN_EDIT" +} +``` + +### Token Permissions + +This resource solves the limitation where all token permissions must be defined in a single `databricks_permissions` resource: + +```hcl +# Multiple resources can now manage different principals independently +resource "databricks_permission" "tokens_team_a" { + authorization = "tokens" + group_name = "Team A" + permission_level = "CAN_USE" +} + +resource "databricks_permission" "tokens_team_b" { + authorization = "tokens" + group_name = "Team B" + permission_level = "CAN_USE" +} + +resource "databricks_permission" "tokens_service_account" { + authorization = "tokens" + service_principal_name = databricks_service_principal.ci_cd.application_id + permission_level = "CAN_USE" +} +``` + +### SQL Endpoint Permissions + +```hcl +resource "databricks_sql_endpoint" "analytics" { + name = "Analytics Warehouse" + cluster_size = "Small" + max_num_clusters = 1 +} + +resource "databricks_permission" "warehouse_users" { + sql_endpoint_id = databricks_sql_endpoint.analytics.id + group_name = "SQL Users" + permission_level = "CAN_USE" +} +``` + +## Argument Reference + +The following arguments are required: + +* `permission_level` - (Required) The permission level to grant. The available permission levels depend on the object type. Common values include `CAN_MANAGE`, `CAN_USE`, `CAN_VIEW`, `CAN_RUN`, `CAN_EDIT`, `CAN_READ`, `CAN_RESTART`, `CAN_ATTACH_TO`. + +Exactly one of the following principal identifiers must be specified: + +* `user_name` - (Optional) User email address to grant permissions to. Conflicts with `group_name` and `service_principal_name`. +* `group_name` - (Optional) Group name to grant permissions to. Conflicts with `user_name` and `service_principal_name`. +* `service_principal_name` - (Optional) Application ID of the service principal. Conflicts with `user_name` and `group_name`. + +Exactly one of the following object identifiers must be specified: + +* `cluster_id` - (Optional) ID of the [databricks_cluster](cluster.md). +* `cluster_policy_id` - (Optional) ID of the [databricks_cluster_policy](cluster_policy.md). +* `instance_pool_id` - (Optional) ID of the [databricks_instance_pool](instance_pool.md). +* `job_id` - (Optional) ID of the [databricks_job](job.md). +* `pipeline_id` - (Optional) ID of the [databricks_pipeline](pipeline.md). +* `notebook_id` - (Optional) ID of the [databricks_notebook](notebook.md). Can be used when the notebook is referenced by ID. +* `notebook_path` - (Optional) Path to the [databricks_notebook](notebook.md). +* `directory_id` - (Optional) ID of the [databricks_directory](directory.md). +* `directory_path` - (Optional) Path to the [databricks_directory](directory.md). +* `workspace_file_id` - (Optional) ID of the [databricks_workspace_file](workspace_file.md). +* `workspace_file_path` - (Optional) Path to the [databricks_workspace_file](workspace_file.md). +* `registered_model_id` - (Optional) ID of the [databricks_mlflow_model](mlflow_model.md). +* `experiment_id` - (Optional) ID of the [databricks_mlflow_experiment](mlflow_experiment.md). +* `sql_dashboard_id` - (Optional) ID of the legacy [databricks_sql_dashboard](sql_dashboard.md). +* `sql_endpoint_id` - (Optional) ID of the [databricks_sql_endpoint](sql_endpoint.md). +* `sql_query_id` - (Optional) ID of the [databricks_query](query.md). +* `sql_alert_id` - (Optional) ID of the [databricks_alert](alert.md). +* `dashboard_id` - (Optional) ID of the [databricks_dashboard](dashboard.md) (Lakeview). +* `repo_id` - (Optional) ID of the [databricks_repo](repo.md). +* `repo_path` - (Optional) Path to the [databricks_repo](repo.md). +* `authorization` - (Optional) Type of authorization. Currently supports `tokens` and `passwords`. +* `serving_endpoint_id` - (Optional) ID of the [databricks_model_serving](model_serving.md) endpoint. +* `vector_search_endpoint_id` - (Optional) ID of the [databricks_vector_search_endpoint](vector_search_endpoint.md). + +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The ID of the permission in the format `///`. +* `object_type` - The type of object (e.g., `clusters`, `jobs`, `notebooks`). + +## Import + +Permissions can be imported using the format `///`. For example: + +```bash +terraform import databricks_permission.cluster_analyst /clusters/0123-456789-abc12345/analyst@company.com +``` + +## Comparison with databricks_permissions + +### When to use `databricks_permission` (singular) + +* You want to manage permissions for individual principals independently +* Different teams manage permissions for different principals on the same object +* You need to avoid the "all-or-nothing" approach of `databricks_permissions` +* You want to add/remove principals without affecting others +* Special cases like token permissions where multiple independent configurations are needed + +### When to use `databricks_permissions` (plural) + +* You want to manage all permissions for an object in one place +* You have a small, stable set of principals for an object +* You want to ensure no unexpected permissions exist on the object +* You prefer a single source of truth for all permissions on an object + +### Example Comparison + +**Using `databricks_permissions` (manages ALL principals)**: + +```hcl +resource "databricks_permissions" "cluster_all" { + cluster_id = databricks_cluster.shared.id + + access_control { + group_name = "Data Engineers" + permission_level = "CAN_RESTART" + } + + access_control { + user_name = "analyst@company.com" + permission_level = "CAN_ATTACH_TO" + } + + # Adding a third principal requires modifying this resource +} +``` + +**Using `databricks_permission` (manages ONE principal per resource)**: + +```hcl +resource "databricks_permission" "cluster_de" { + cluster_id = databricks_cluster.shared.id + group_name = "Data Engineers" + permission_level = "CAN_RESTART" +} + +resource "databricks_permission" "cluster_analyst" { + cluster_id = databricks_cluster.shared.id + user_name = "analyst@company.com" + permission_level = "CAN_ATTACH_TO" +} + +# Adding a third principal is a separate resource +# No need to modify existing resources +resource "databricks_permission" "cluster_viewer" { + cluster_id = databricks_cluster.shared.id + group_name = "Viewers" + permission_level = "CAN_ATTACH_TO" +} +``` + +## Related Resources + +The following resources are used in the same context: + +* [databricks_permissions](permissions.md) - Manage all permissions for an object at once +* [databricks_cluster](cluster.md) - Create Databricks clusters +* [databricks_job](job.md) - Create Databricks jobs +* [databricks_notebook](notebook.md) - Manage Databricks notebooks +* [databricks_group](group.md) - Manage Databricks groups +* [databricks_user](user.md) - Manage Databricks users +* [databricks_service_principal](service_principal.md) - Manage Databricks service principals diff --git a/internal/providers/pluginfw/pluginfw_rollout_utils.go b/internal/providers/pluginfw/pluginfw_rollout_utils.go index 575f2507c4..9c3b242e3f 100644 --- a/internal/providers/pluginfw/pluginfw_rollout_utils.go +++ b/internal/providers/pluginfw/pluginfw_rollout_utils.go @@ -19,6 +19,7 @@ import ( "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/products/dashboards" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/products/library" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/products/notificationdestinations" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/products/permissions" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/products/qualitymonitor" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/products/registered_model" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/products/serving" @@ -49,6 +50,7 @@ var migratedDataSources = []func() datasource.DataSource{ var pluginFwOnlyResources = append( []func() resource.Resource{ app.ResourceApp, + permissions.ResourcePermission, }, autoGeneratedResources..., ) diff --git a/internal/providers/pluginfw/products/permissions/object_id_helpers.go b/internal/providers/pluginfw/products/permissions/object_id_helpers.go new file mode 100644 index 0000000000..06fefa9f7b --- /dev/null +++ b/internal/providers/pluginfw/products/permissions/object_id_helpers.go @@ -0,0 +1,91 @@ +package permissions + +import ( + "context" + "maps" + "slices" + + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// ObjectIdentifiers is a struct containing all object identifier fields +// This is used by both PermissionResourceModel and the validator +type ObjectIdentifiers struct { + ClusterId types.String `tfsdk:"cluster_id"` + ClusterPolicyId types.String `tfsdk:"cluster_policy_id"` + InstancePoolId types.String `tfsdk:"instance_pool_id"` + JobId types.String `tfsdk:"job_id"` + PipelineId types.String `tfsdk:"pipeline_id"` + NotebookId types.String `tfsdk:"notebook_id"` + NotebookPath types.String `tfsdk:"notebook_path"` + DirectoryId types.String `tfsdk:"directory_id"` + DirectoryPath types.String `tfsdk:"directory_path"` + WorkspaceFileId types.String `tfsdk:"workspace_file_id"` + WorkspaceFilePath types.String `tfsdk:"workspace_file_path"` + RegisteredModelId types.String `tfsdk:"registered_model_id"` + ExperimentId types.String `tfsdk:"experiment_id"` + SqlDashboardId types.String `tfsdk:"sql_dashboard_id"` + SqlEndpointId types.String `tfsdk:"sql_endpoint_id"` + SqlQueryId types.String `tfsdk:"sql_query_id"` + SqlAlertId types.String `tfsdk:"sql_alert_id"` + DashboardId types.String `tfsdk:"dashboard_id"` + RepoId types.String `tfsdk:"repo_id"` + RepoPath types.String `tfsdk:"repo_path"` + Authorization types.String `tfsdk:"authorization"` + ServingEndpointId types.String `tfsdk:"serving_endpoint_id"` + VectorSearchEndpointId types.String `tfsdk:"vector_search_endpoint_id"` + AppName types.String `tfsdk:"app_name"` + DatabaseInstanceName types.String `tfsdk:"database_instance_name"` + AlertV2Id types.String `tfsdk:"alert_v2_id"` +} + +// ToFieldValuesMap converts the ObjectIdentifiers struct to a map of field names to values +func (o *ObjectIdentifiers) ToFieldValuesMap() map[string]string { + return map[string]string{ + "cluster_id": o.ClusterId.ValueString(), + "cluster_policy_id": o.ClusterPolicyId.ValueString(), + "instance_pool_id": o.InstancePoolId.ValueString(), + "job_id": o.JobId.ValueString(), + "pipeline_id": o.PipelineId.ValueString(), + "notebook_id": o.NotebookId.ValueString(), + "notebook_path": o.NotebookPath.ValueString(), + "directory_id": o.DirectoryId.ValueString(), + "directory_path": o.DirectoryPath.ValueString(), + "workspace_file_id": o.WorkspaceFileId.ValueString(), + "workspace_file_path": o.WorkspaceFilePath.ValueString(), + "registered_model_id": o.RegisteredModelId.ValueString(), + "experiment_id": o.ExperimentId.ValueString(), + "sql_dashboard_id": o.SqlDashboardId.ValueString(), + "sql_endpoint_id": o.SqlEndpointId.ValueString(), + "sql_query_id": o.SqlQueryId.ValueString(), + "sql_alert_id": o.SqlAlertId.ValueString(), + "dashboard_id": o.DashboardId.ValueString(), + "repo_id": o.RepoId.ValueString(), + "repo_path": o.RepoPath.ValueString(), + "authorization": o.Authorization.ValueString(), + "serving_endpoint_id": o.ServingEndpointId.ValueString(), + "vector_search_endpoint_id": o.VectorSearchEndpointId.ValueString(), + "app_name": o.AppName.ValueString(), + "database_instance_name": o.DatabaseInstanceName.ValueString(), + "alert_v2_id": o.AlertV2Id.ValueString(), + } +} + +// GetObjectIdentifierFields returns all possible object identifier field names +// This is derived from the keys of ToFieldValuesMap() to maintain single source of truth +func GetObjectIdentifierFields() []string { + var empty ObjectIdentifiers + return slices.Collect(maps.Keys(empty.ToFieldValuesMap())) +} + +// ExtractObjectIdentifiersFromConfig reads object identifiers from a tfsdk.Config +// Returns nil if there are errors reading the config +func ExtractObjectIdentifiersFromConfig(ctx context.Context, config tfsdk.Config) *ObjectIdentifiers { + var objectIds ObjectIdentifiers + diags := config.Get(ctx, &objectIds) + if diags.HasError() { + return nil + } + return &objectIds +} diff --git a/internal/providers/pluginfw/products/permissions/object_id_helpers_test.go b/internal/providers/pluginfw/products/permissions/object_id_helpers_test.go new file mode 100644 index 0000000000..b41b2392ac --- /dev/null +++ b/internal/providers/pluginfw/products/permissions/object_id_helpers_test.go @@ -0,0 +1,180 @@ +package permissions + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" +) + +func TestObjectIdentifiers_ToFieldValuesMap(t *testing.T) { + tests := []struct { + name string + objIds ObjectIdentifiers + expected map[string]string + }{ + { + name: "cluster_id set", + objIds: ObjectIdentifiers{ + ClusterId: types.StringValue("cluster-123"), + }, + expected: map[string]string{ + "cluster_id": "cluster-123", + // All other fields should be empty strings + "cluster_policy_id": "", + "instance_pool_id": "", + "job_id": "", + "pipeline_id": "", + "notebook_id": "", + "notebook_path": "", + "directory_id": "", + "directory_path": "", + "workspace_file_id": "", + "workspace_file_path": "", + "registered_model_id": "", + "experiment_id": "", + "sql_dashboard_id": "", + "sql_endpoint_id": "", + "sql_query_id": "", + "sql_alert_id": "", + "dashboard_id": "", + "repo_id": "", + "repo_path": "", + "authorization": "", + "serving_endpoint_id": "", + "vector_search_endpoint_id": "", + "app_name": "", + "database_instance_name": "", + "alert_v2_id": "", + }, + }, + { + name: "multiple fields set", + objIds: ObjectIdentifiers{ + JobId: types.StringValue("job-456"), + Authorization: types.StringValue("tokens"), + NotebookPath: types.StringValue("/Users/test/notebook"), + }, + expected: map[string]string{ + "cluster_id": "", + "cluster_policy_id": "", + "instance_pool_id": "", + "job_id": "job-456", + "pipeline_id": "", + "notebook_id": "", + "notebook_path": "/Users/test/notebook", + "directory_id": "", + "directory_path": "", + "workspace_file_id": "", + "workspace_file_path": "", + "registered_model_id": "", + "experiment_id": "", + "sql_dashboard_id": "", + "sql_endpoint_id": "", + "sql_query_id": "", + "sql_alert_id": "", + "dashboard_id": "", + "repo_id": "", + "repo_path": "", + "authorization": "tokens", + "serving_endpoint_id": "", + "vector_search_endpoint_id": "", + "app_name": "", + "database_instance_name": "", + "alert_v2_id": "", + }, + }, + { + name: "all fields empty", + objIds: ObjectIdentifiers{}, + expected: map[string]string{ + "cluster_id": "", + "cluster_policy_id": "", + "instance_pool_id": "", + "job_id": "", + "pipeline_id": "", + "notebook_id": "", + "notebook_path": "", + "directory_id": "", + "directory_path": "", + "workspace_file_id": "", + "workspace_file_path": "", + "registered_model_id": "", + "experiment_id": "", + "sql_dashboard_id": "", + "sql_endpoint_id": "", + "sql_query_id": "", + "sql_alert_id": "", + "dashboard_id": "", + "repo_id": "", + "repo_path": "", + "authorization": "", + "serving_endpoint_id": "", + "vector_search_endpoint_id": "", + "app_name": "", + "database_instance_name": "", + "alert_v2_id": "", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tt.objIds.ToFieldValuesMap() + + // Check that all expected keys are present + assert.Len(t, result, len(tt.expected), "Map should have correct number of entries") + + // Check specific values we care about + for key, expectedValue := range tt.expected { + assert.Contains(t, result, key, "Map should contain key: %s", key) + assert.Equal(t, expectedValue, result[key], "Value for key %s should match", key) + } + }) + } +} + +func TestGetObjectIdentifierFields(t *testing.T) { + // Verify that GetObjectIdentifierFields returns all expected fields + expectedFields := []string{ + "cluster_id", + "cluster_policy_id", + "instance_pool_id", + "job_id", + "pipeline_id", + "notebook_id", + "notebook_path", + "directory_id", + "directory_path", + "workspace_file_id", + "workspace_file_path", + "registered_model_id", + "experiment_id", + "sql_dashboard_id", + "sql_endpoint_id", + "sql_query_id", + "sql_alert_id", + "dashboard_id", + "repo_id", + "repo_path", + "authorization", + "serving_endpoint_id", + "vector_search_endpoint_id", + "app_name", + "database_instance_name", + "alert_v2_id", + } + + fields := GetObjectIdentifierFields() + assert.Len(t, fields, len(expectedFields), "Should have all object identifier fields") + + // Check that all expected fields are present + fieldMap := make(map[string]bool) + for _, field := range fields { + fieldMap[field] = true + } + + for _, expected := range expectedFields { + assert.True(t, fieldMap[expected], "GetObjectIdentifierFields() should contain: %s", expected) + } +} diff --git a/internal/providers/pluginfw/products/permissions/permission_entity.go b/internal/providers/pluginfw/products/permissions/permission_entity.go new file mode 100644 index 0000000000..4fa0b2efde --- /dev/null +++ b/internal/providers/pluginfw/products/permissions/permission_entity.go @@ -0,0 +1,59 @@ +package permissions + +import "github.com/databricks/databricks-sdk-go/service/iam" + +// PermissionEntity is the entity used for singular databricks_permission resource +// It represents permissions for a single principal on a single object +// Note: Currently not used, the resource uses PermissionResourceModel directly +type PermissionEntity struct { + // Object type - computed + ObjectType string `json:"object_type,omitempty" tf:"computed"` + + // Principal identifiers - exactly one required + UserName string `json:"user_name,omitempty"` + GroupName string `json:"group_name,omitempty"` + ServicePrincipalName string `json:"service_principal_name,omitempty"` + + // Permission level for this principal + PermissionLevel iam.PermissionLevel `json:"permission_level"` +} + +// GetPrincipalName returns the principal identifier from the entity +func (p PermissionEntity) GetPrincipalName() string { + if p.UserName != "" { + return p.UserName + } + if p.GroupName != "" { + return p.GroupName + } + if p.ServicePrincipalName != "" { + return p.ServicePrincipalName + } + return "" +} + +// ToAccessControlRequest converts PermissionEntity to AccessControlRequest for API calls +func (p PermissionEntity) ToAccessControlRequest() iam.AccessControlRequest { + return iam.AccessControlRequest{ + UserName: p.UserName, + GroupName: p.GroupName, + ServicePrincipalName: p.ServicePrincipalName, + PermissionLevel: p.PermissionLevel, + } +} + +// FromAccessControlResponse creates a PermissionEntity from an AccessControlResponse +func FromAccessControlResponse(acr iam.AccessControlResponse) PermissionEntity { + // Get the highest permission level from AllPermissions + var permissionLevel iam.PermissionLevel + if len(acr.AllPermissions) > 0 { + permissionLevel = acr.AllPermissions[0].PermissionLevel + } + + return PermissionEntity{ + UserName: acr.UserName, + GroupName: acr.GroupName, + ServicePrincipalName: acr.ServicePrincipalName, + PermissionLevel: permissionLevel, + } +} diff --git a/internal/providers/pluginfw/products/permissions/permission_level_validator.go b/internal/providers/pluginfw/products/permissions/permission_level_validator.go new file mode 100644 index 0000000000..b997734615 --- /dev/null +++ b/internal/providers/pluginfw/products/permissions/permission_level_validator.go @@ -0,0 +1,84 @@ +package permissions + +import ( + "context" + "fmt" + + "github.com/databricks/terraform-provider-databricks/permissions" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// permissionLevelValidator validates that the permission_level is valid for the configured object type +type permissionLevelValidator struct{} + +func (v permissionLevelValidator) Description(ctx context.Context) string { + return "validates that the permission level is valid for the configured object type" +} + +func (v permissionLevelValidator) MarkdownDescription(ctx context.Context) string { + return "validates that the permission level is valid for the configured object type" +} + +func (v permissionLevelValidator) ValidateString(ctx context.Context, req validator.StringRequest, resp *validator.StringResponse) { + if req.ConfigValue.IsNull() || req.ConfigValue.IsUnknown() { + return + } + + permissionLevel := req.ConfigValue.ValueString() + + // Dynamically iterate through all permission definitions to find which object ID is set + allPermissions := permissions.AllResourcePermissions() + var mapping permissions.WorkspaceObjectPermissions + var found bool + + for _, m := range allPermissions { + var attrValue types.String + diags := req.Config.GetAttribute(ctx, path.Root(m.GetField()), &attrValue) + if diags.HasError() { + continue // Attribute doesn't exist or has errors, try next + } + + if !attrValue.IsNull() && !attrValue.IsUnknown() && attrValue.ValueString() != "" { + mapping = m + found = true + break + } + } + + if !found { + // If we can't determine the object type, let the ConflictsWith validators handle it + return + } + + // Get allowed permission levels for this object type + allowedLevels := mapping.GetAllowedPermissionLevels(true) // true = include non-management permissions + + // Check if the configured permission level is allowed + isValid := false + for _, allowedLevel := range allowedLevels { + if allowedLevel == permissionLevel { + isValid = true + break + } + } + + if !isValid { + resp.Diagnostics.AddAttributeError( + req.Path, + "Invalid Permission Level", + fmt.Sprintf( + "Permission level %q is not valid for object type %q. Allowed levels: %v", + permissionLevel, + mapping.GetObjectType(), + allowedLevels, + ), + ) + } +} + +// ValidatePermissionLevel returns a validator that checks if the permission level is valid for the object type +func ValidatePermissionLevel() validator.String { + return permissionLevelValidator{} +} diff --git a/internal/providers/pluginfw/products/permissions/permission_level_validator_test.go b/internal/providers/pluginfw/products/permissions/permission_level_validator_test.go new file mode 100644 index 0000000000..ce1d69f6c8 --- /dev/null +++ b/internal/providers/pluginfw/products/permissions/permission_level_validator_test.go @@ -0,0 +1,24 @@ +package permissions + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPermissionLevelValidator_Description(t *testing.T) { + validator := ValidatePermissionLevel() + + desc := validator.Description(context.Background()) + assert.NotEmpty(t, desc) + assert.Contains(t, desc, "permission level") + + mdDesc := validator.MarkdownDescription(context.Background()) + assert.NotEmpty(t, mdDesc) + assert.Contains(t, mdDesc, "permission level") +} + +// Note: Full validation testing with config is done in acceptance tests +// The validator requires access to the full config to determine object type, +// which is complex to mock in unit tests but straightforward in integration tests diff --git a/internal/providers/pluginfw/products/permissions/resource_permission.go b/internal/providers/pluginfw/products/permissions/resource_permission.go new file mode 100644 index 0000000000..1a38973e3a --- /dev/null +++ b/internal/providers/pluginfw/products/permissions/resource_permission.go @@ -0,0 +1,523 @@ +package permissions + +import ( + "context" + "fmt" + "strings" + + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/databricks/terraform-provider-databricks/common" + pluginfwcommon "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/common" + "github.com/databricks/terraform-provider-databricks/permissions" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +const resourceName = "permission" + +var _ resource.ResourceWithConfigure = &PermissionResource{} + +func ResourcePermission() resource.Resource { + return &PermissionResource{} +} + +type PermissionResource struct { + Client *common.DatabricksClient +} + +// PermissionResourceModel represents the Terraform resource model +// Note: Object identifiers are NOT defined in the struct - they are read/written dynamically +// using GetAttribute()/SetAttribute(). This eliminates the need for hardcoded fields +// and makes the code truly generic - new permission types require zero changes here. +type PermissionResourceModel struct { + ID types.String `tfsdk:"id"` + ObjectType types.String `tfsdk:"object_type"` + + // Principal identifiers - exactly one required + UserName types.String `tfsdk:"user_name"` + GroupName types.String `tfsdk:"group_name"` + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + + // Permission level + PermissionLevel types.String `tfsdk:"permission_level"` + + // Note: Object identifiers (cluster_id, job_id, etc.) are NOT defined here. + // They are accessed dynamically using GetAttribute()/SetAttribute() based on + // the definitions in permissions/permission_definitions.go +} + +func (r *PermissionResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = pluginfwcommon.GetDatabricksProductionName(resourceName) +} + +func (r *PermissionResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + // Collect all object identifier field names for ConflictsWith validators + allPermissions := permissions.AllResourcePermissions() + objectFieldPaths := make([]path.Expression, 0, len(allPermissions)) + for _, mapping := range allPermissions { + objectFieldPaths = append(objectFieldPaths, path.MatchRoot(mapping.GetField())) + } + + attrs := map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "object_type": schema.StringAttribute{ + Computed: true, + }, + // Principal identifiers - exactly one required, mutually exclusive + "user_name": schema.StringAttribute{ + Optional: true, + Description: "User name of the principal. Conflicts with group_name and service_principal_name.", + Validators: []validator.String{ + stringvalidator.ConflictsWith( + path.MatchRoot("group_name"), + path.MatchRoot("service_principal_name"), + ), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "group_name": schema.StringAttribute{ + Optional: true, + Description: "Group name of the principal. Conflicts with user_name and service_principal_name.", + Validators: []validator.String{ + stringvalidator.ConflictsWith( + path.MatchRoot("user_name"), + path.MatchRoot("service_principal_name"), + ), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "service_principal_name": schema.StringAttribute{ + Optional: true, + Description: "Service principal name. Conflicts with user_name and group_name.", + Validators: []validator.String{ + stringvalidator.ConflictsWith( + path.MatchRoot("user_name"), + path.MatchRoot("group_name"), + ), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + // Permission level + "permission_level": schema.StringAttribute{ + Required: true, + Description: "Permission level for the principal on the object (e.g., CAN_MANAGE, CAN_USE, CAN_VIEW).", + Validators: []validator.String{ + ValidatePermissionLevel(), + }, + }, + } + + // Dynamically add object identifier attributes from permission definitions + // Each object identifier is mutually exclusive with all others + for i, mapping := range allPermissions { + fieldName := mapping.GetField() + + // Build ConflictsWith list - all other object fields except this one + conflictPaths := make([]path.Expression, 0, len(objectFieldPaths)-1) + for j, p := range objectFieldPaths { + if i != j { + conflictPaths = append(conflictPaths, p) + } + } + + attrs[fieldName] = schema.StringAttribute{ + Optional: true, + Description: fmt.Sprintf("ID or path for %s object type. Conflicts with all other object identifier attributes.", mapping.GetObjectType()), + Validators: []validator.String{ + stringvalidator.ConflictsWith(conflictPaths...), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + } + } + + resp.Schema = schema.Schema{ + Description: "Manages permissions for a single principal on a Databricks object. " + + "This resource is authoritative for the specified object-principal pair only. " + + "Use `databricks_permissions` for managing all principals on an object at once.", + Attributes: attrs, + } +} + +func (r *PermissionResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + client, ok := req.ProviderData.(*common.DatabricksClient) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Resource Configure Type", + fmt.Sprintf("Expected *common.DatabricksClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + r.Client = client +} + +func (r *PermissionResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Read principal and permission_level using GetAttribute + var userName, groupName, servicePrincipalName, permissionLevel types.String + + resp.Diagnostics.Append(req.Plan.GetAttribute(ctx, path.Root("user_name"), &userName)...) + resp.Diagnostics.Append(req.Plan.GetAttribute(ctx, path.Root("group_name"), &groupName)...) + resp.Diagnostics.Append(req.Plan.GetAttribute(ctx, path.Root("service_principal_name"), &servicePrincipalName)...) + resp.Diagnostics.Append(req.Plan.GetAttribute(ctx, path.Root("permission_level"), &permissionLevel)...) + + if resp.Diagnostics.HasError() { + return + } + + w, err := r.Client.WorkspaceClient() + if err != nil { + resp.Diagnostics.AddError("Failed to get workspace client", err.Error()) + return + } + + // Get the object mapping and ID (reads object identifiers dynamically from plan) + mapping, objectID, objectFieldName, objectFieldValue, err := r.getObjectMappingAndID(ctx, w, req.Plan) + if err != nil { + resp.Diagnostics.AddError("Failed to get object mapping", err.Error()) + return + } + + // Determine principal identifier + var principal string + if !userName.IsNull() && !userName.IsUnknown() && userName.ValueString() != "" { + principal = userName.ValueString() + } else if !groupName.IsNull() && !groupName.IsUnknown() && groupName.ValueString() != "" { + principal = groupName.ValueString() + } else if !servicePrincipalName.IsNull() && !servicePrincipalName.IsUnknown() && servicePrincipalName.ValueString() != "" { + principal = servicePrincipalName.ValueString() + } else { + resp.Diagnostics.AddError("Invalid principal configuration", "exactly one of 'user_name', 'group_name', or 'service_principal_name' must be set") + return + } + + // Create the permission update request + permLevel := iam.PermissionLevel(permissionLevel.ValueString()) + + // Use Update API (PATCH) to add permissions for this principal only + idParts := strings.Split(objectID, "/") + permID := idParts[len(idParts)-1] + + _, err = w.Permissions.Update(ctx, iam.UpdateObjectPermissions{ + RequestObjectId: permID, + RequestObjectType: mapping.GetRequestObjectType(), + AccessControlList: []iam.AccessControlRequest{ + { + UserName: userName.ValueString(), + GroupName: groupName.ValueString(), + ServicePrincipalName: servicePrincipalName.ValueString(), + PermissionLevel: permLevel, + }, + }, + }) + if err != nil { + resp.Diagnostics.AddError("Failed to create permission", err.Error()) + return + } + + // Set the ID, object_type, and all other fields in state + resourceID := fmt.Sprintf("%s/%s", objectID, principal) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("id"), types.StringValue(resourceID))...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("object_type"), types.StringValue(mapping.GetObjectType()))...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root(objectFieldName), objectFieldValue)...) // Set the object identifier field + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("user_name"), userName)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("group_name"), groupName)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("service_principal_name"), servicePrincipalName)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("permission_level"), permissionLevel)...) +} + +func (r *PermissionResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Read ID from state using GetAttribute + var id types.String + resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("id"), &id)...) + if resp.Diagnostics.HasError() { + return + } + + w, err := r.Client.WorkspaceClient() + if err != nil { + resp.Diagnostics.AddError("Failed to get workspace client", err.Error()) + return + } + + // Parse the ID to get object ID and principal + objectID, principal, err := r.parseID(id.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Failed to parse resource ID", err.Error()) + return + } + + // Get the mapping from the ID + mapping, err := permissions.GetResourcePermissionsFromId(objectID) + if err != nil { + resp.Diagnostics.AddError("Failed to get resource permissions mapping", err.Error()) + return + } + + // Read current permissions + idParts := strings.Split(objectID, "/") + permID := idParts[len(idParts)-1] + + perms, err := w.Permissions.Get(ctx, iam.GetPermissionRequest{ + RequestObjectId: permID, + RequestObjectType: mapping.GetRequestObjectType(), + }) + if err != nil { + resp.Diagnostics.AddError("Failed to read permissions", err.Error()) + return + } + + // Filter for the specific principal + found := false + var currentPermissionLevel types.String + for _, acl := range perms.AccessControlList { + if r.matchesPrincipal(acl, principal) { + // Update the state with the current permission level + if len(acl.AllPermissions) > 0 { + currentPermissionLevel = types.StringValue(string(acl.AllPermissions[0].PermissionLevel)) + found = true + break + } + } + } + + if !found { + // Permission no longer exists, remove from state + resp.State.RemoveResource(ctx) + return + } + + // Read the object identifier field from current state to preserve it + // (It should already be in state, but we need to make sure it stays there) + var objectFieldValue types.String + resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root(mapping.GetField()), &objectFieldValue)...) + if resp.Diagnostics.HasError() { + return + } + + // Update state using SetAttribute + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root(mapping.GetField()), objectFieldValue)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("permission_level"), currentPermissionLevel)...) +} + +func (r *PermissionResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // Read ID, principals, and permission_level from plan using GetAttribute + var id, userName, groupName, servicePrincipalName, permissionLevel types.String + + resp.Diagnostics.Append(req.Plan.GetAttribute(ctx, path.Root("id"), &id)...) + resp.Diagnostics.Append(req.Plan.GetAttribute(ctx, path.Root("user_name"), &userName)...) + resp.Diagnostics.Append(req.Plan.GetAttribute(ctx, path.Root("group_name"), &groupName)...) + resp.Diagnostics.Append(req.Plan.GetAttribute(ctx, path.Root("service_principal_name"), &servicePrincipalName)...) + resp.Diagnostics.Append(req.Plan.GetAttribute(ctx, path.Root("permission_level"), &permissionLevel)...) + + if resp.Diagnostics.HasError() { + return + } + + w, err := r.Client.WorkspaceClient() + if err != nil { + resp.Diagnostics.AddError("Failed to get workspace client", err.Error()) + return + } + + // Parse the ID to get object ID and principal + objectID, _, err := r.parseID(id.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Failed to parse resource ID", err.Error()) + return + } + + // Get the mapping from the ID + mapping, err := permissions.GetResourcePermissionsFromId(objectID) + if err != nil { + resp.Diagnostics.AddError("Failed to get resource permissions mapping", err.Error()) + return + } + + // Update the permission using PATCH + permLevel := iam.PermissionLevel(permissionLevel.ValueString()) + idParts := strings.Split(objectID, "/") + permID := idParts[len(idParts)-1] + + _, err = w.Permissions.Update(ctx, iam.UpdateObjectPermissions{ + RequestObjectId: permID, + RequestObjectType: mapping.GetRequestObjectType(), + AccessControlList: []iam.AccessControlRequest{ + { + UserName: userName.ValueString(), + GroupName: groupName.ValueString(), + ServicePrincipalName: servicePrincipalName.ValueString(), + PermissionLevel: permLevel, + }, + }, + }) + if err != nil { + resp.Diagnostics.AddError("Failed to update permission", err.Error()) + return + } + + // Read the object identifier field from current state to preserve it + var objectFieldValue types.String + resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root(mapping.GetField()), &objectFieldValue)...) + if resp.Diagnostics.HasError() { + return + } + + // Update state using SetAttribute - preserve the object identifier field + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root(mapping.GetField()), objectFieldValue)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("permission_level"), permissionLevel)...) +} + +func (r *PermissionResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Read ID from state using GetAttribute + var id types.String + resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("id"), &id)...) + if resp.Diagnostics.HasError() { + return + } + + w, err := r.Client.WorkspaceClient() + if err != nil { + resp.Diagnostics.AddError("Failed to get workspace client", err.Error()) + return + } + + // Parse the ID to get object ID and principal + objectID, principal, err := r.parseID(id.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Failed to parse resource ID", err.Error()) + return + } + + // Get the mapping from the ID + mapping, err := permissions.GetResourcePermissionsFromId(objectID) + if err != nil { + resp.Diagnostics.AddError("Failed to get resource permissions mapping", err.Error()) + return + } + + // Read current permissions to see what to remove + idParts := strings.Split(objectID, "/") + permID := idParts[len(idParts)-1] + + currentPerms, err := w.Permissions.Get(ctx, iam.GetPermissionRequest{ + RequestObjectId: permID, + RequestObjectType: mapping.GetRequestObjectType(), + }) + if err != nil { + resp.Diagnostics.AddError("Failed to read current permissions", err.Error()) + return + } + + // Build a list of all permissions EXCEPT the one we're deleting + var remainingACLs []iam.AccessControlRequest + for _, acl := range currentPerms.AccessControlList { + if !r.matchesPrincipal(acl, principal) { + // Keep this ACL + if len(acl.AllPermissions) > 0 { + remainingACLs = append(remainingACLs, iam.AccessControlRequest{ + UserName: acl.UserName, + GroupName: acl.GroupName, + ServicePrincipalName: acl.ServicePrincipalName, + PermissionLevel: acl.AllPermissions[0].PermissionLevel, + }) + } + } + } + + // Use Set to replace all permissions (effectively removing the specified principal) + _, err = w.Permissions.Set(ctx, iam.SetObjectPermissions{ + RequestObjectId: permID, + RequestObjectType: mapping.GetRequestObjectType(), + AccessControlList: remainingACLs, + }) + if err != nil { + resp.Diagnostics.AddError("Failed to delete permission", err.Error()) + return + } +} + +// Helper methods + +// AttributeGetter is an interface for types that can get attributes (Plan, Config, State) +type AttributeGetter interface { + GetAttribute(ctx context.Context, path path.Path, target interface{}) diag.Diagnostics +} + +// PermissionMapping is an interface that abstracts the permissions mapping operations +type PermissionMapping interface { + GetRequestObjectType() string + GetObjectType() string + GetID(context.Context, *databricks.WorkspaceClient, string) (string, error) +} + +func (r *PermissionResource) getObjectMappingAndID(ctx context.Context, w *databricks.WorkspaceClient, getter AttributeGetter) (PermissionMapping, string, string, types.String, error) { + // Dynamically iterate through all permission definitions to find which object ID is set + allPermissions := permissions.AllResourcePermissions() + + for _, mapping := range allPermissions { + var attrValue types.String + diags := getter.GetAttribute(ctx, path.Root(mapping.GetField()), &attrValue) + if diags.HasError() { + continue // Attribute doesn't exist or has errors, try next + } + + if !attrValue.IsNull() && !attrValue.IsUnknown() && attrValue.ValueString() != "" { + configuredValue := attrValue.ValueString() + + // Get the object ID (may involve path resolution) + objectID, err := mapping.GetID(ctx, w, configuredValue) + if err != nil { + return nil, "", "", types.String{}, err + } + + // Return mapping, objectID, field name, and field value + return mapping, objectID, mapping.GetField(), attrValue, nil + } + } + + // No object identifier was set + return nil, "", "", types.String{}, fmt.Errorf("at least one object identifier must be set") +} + +func (r *PermissionResource) matchesPrincipal(acl iam.AccessControlResponse, principal string) bool { + return acl.UserName == principal || + acl.GroupName == principal || + acl.ServicePrincipalName == principal +} + +func (r *PermissionResource) parseID(id string) (objectID string, principal string, error error) { + // ID format: /// + parts := strings.Split(id, "/") + if len(parts) < 4 { + return "", "", fmt.Errorf("invalid ID format: expected ///, got %s", id) + } + + // Reconstruct object ID and get principal + principal = parts[len(parts)-1] + objectID = strings.Join(parts[:len(parts)-1], "/") + + return objectID, principal, nil +} diff --git a/internal/providers/pluginfw/products/permissions/resource_permission_test.go b/internal/providers/pluginfw/products/permissions/resource_permission_test.go new file mode 100644 index 0000000000..dccb1c691f --- /dev/null +++ b/internal/providers/pluginfw/products/permissions/resource_permission_test.go @@ -0,0 +1,140 @@ +package permissions + +import ( + "context" + "testing" + + "github.com/databricks/terraform-provider-databricks/common" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPermissionResource_Schema(t *testing.T) { + r := ResourcePermission() + require.NotNil(t, r) + + var resp resource.SchemaResponse + r.Schema(context.Background(), resource.SchemaRequest{}, &resp) + + // Verify schema has required fields + require.NotNil(t, resp.Schema.Attributes) + + // Check principal fields + _, ok := resp.Schema.Attributes["user_name"] + assert.True(t, ok, "user_name should be in schema") + + _, ok = resp.Schema.Attributes["group_name"] + assert.True(t, ok, "group_name should be in schema") + + _, ok = resp.Schema.Attributes["service_principal_name"] + assert.True(t, ok, "service_principal_name should be in schema") + + // Check permission level + _, ok = resp.Schema.Attributes["permission_level"] + assert.True(t, ok, "permission_level should be in schema") + + // Check some object identifiers + _, ok = resp.Schema.Attributes["cluster_id"] + assert.True(t, ok, "cluster_id should be in schema") + + _, ok = resp.Schema.Attributes["job_id"] + assert.True(t, ok, "job_id should be in schema") + + _, ok = resp.Schema.Attributes["authorization"] + assert.True(t, ok, "authorization should be in schema") + + // Check computed fields + idAttr, ok := resp.Schema.Attributes["id"] + assert.True(t, ok, "id should be in schema") + if stringAttr, ok := idAttr.(schema.StringAttribute); ok { + assert.True(t, stringAttr.Computed, "id should be computed") + } + + objectTypeAttr, ok := resp.Schema.Attributes["object_type"] + assert.True(t, ok, "object_type should be in schema") + if stringAttr, ok := objectTypeAttr.(schema.StringAttribute); ok { + assert.True(t, stringAttr.Computed, "object_type should be computed") + } +} + +func TestPermissionResource_Metadata(t *testing.T) { + r := ResourcePermission() + var resp resource.MetadataResponse + r.Metadata(context.Background(), resource.MetadataRequest{ + ProviderTypeName: "databricks", + }, &resp) + + assert.Equal(t, "databricks_permission", resp.TypeName) +} + +func TestPermissionResource_ParseID(t *testing.T) { + r := &PermissionResource{} + + tests := []struct { + name string + id string + wantObjectID string + wantPrincipal string + wantError bool + }{ + { + name: "cluster permission", + id: "/clusters/test-cluster-id/user@example.com", + wantObjectID: "/clusters/test-cluster-id", + wantPrincipal: "user@example.com", + wantError: false, + }, + { + name: "job permission", + id: "/jobs/123/test-group", + wantObjectID: "/jobs/123", + wantPrincipal: "test-group", + wantError: false, + }, + { + name: "authorization tokens", + id: "/authorization/tokens/developers", + wantObjectID: "/authorization/tokens", + wantPrincipal: "developers", + wantError: false, + }, + { + name: "invalid format - too few parts", + id: "/clusters/test-cluster-id", + wantError: true, + }, + { + name: "invalid format - no slashes", + id: "test-id", + wantError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotObjectID, gotPrincipal, err := r.parseID(tt.id) + if tt.wantError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.wantObjectID, gotObjectID) + assert.Equal(t, tt.wantPrincipal, gotPrincipal) + } + }) + } +} + +func TestPermissionResource_Configure(t *testing.T) { + r := &PermissionResource{} + client := &common.DatabricksClient{} + + var resp resource.ConfigureResponse + r.Configure(context.Background(), resource.ConfigureRequest{ + ProviderData: client, + }, &resp) + + assert.False(t, resp.Diagnostics.HasError()) + assert.Equal(t, client, r.Client) +} diff --git a/permissions/permission_definitions.go b/permissions/permission_definitions.go index a15ef91675..7fdeea6c7a 100644 --- a/permissions/permission_definitions.go +++ b/permissions/permission_definitions.go @@ -17,8 +17,8 @@ import ( "github.com/hashicorp/terraform-plugin-log/tflog" ) -// resourcePermissions captures all the information needed to manage permissions for a given object type. -type resourcePermissions struct { +// WorkspaceObjectPermissions captures all the information needed to manage permissions for a given object type. +type WorkspaceObjectPermissions struct { // Mandatory Fields // The attribute name that users configure with the ID of the object to manage @@ -77,8 +77,8 @@ type resourcePermissions struct { fetchObjectCreator func(ctx context.Context, w *databricks.WorkspaceClient, objectID string) (string, error) } -// getAllowedPermissionLevels returns the list of permission levels that are allowed for this resource type. -func (p resourcePermissions) getAllowedPermissionLevels(includeNonManagementPermissions bool) []string { +// GetAllowedPermissionLevels returns the list of permission levels that are allowed for this resource type. +func (p WorkspaceObjectPermissions) GetAllowedPermissionLevels(includeNonManagementPermissions bool) []string { levels := make([]string, 0, len(p.allowedPermissionLevels)) for level := range p.allowedPermissionLevels { if includeNonManagementPermissions || p.allowedPermissionLevels[level].isManagementPermission { @@ -99,7 +99,7 @@ type resourceStatus struct { // getObjectStatus returns the creator of the object and whether the object exists. If the object creator cannot be determined for this // resource type, an empty string is returned. Resources without fetchObjectCreator are assumed to exist and have an unknown creator. -func (p resourcePermissions) getObjectStatus(ctx context.Context, w *databricks.WorkspaceClient, objectID string) (resourceStatus, error) { +func (p WorkspaceObjectPermissions) getObjectStatus(ctx context.Context, w *databricks.WorkspaceClient, objectID string) (resourceStatus, error) { if p.fetchObjectCreator != nil { creator, err := p.fetchObjectCreator(ctx, w, objectID) if err != nil { @@ -111,7 +111,7 @@ func (p resourcePermissions) getObjectStatus(ctx context.Context, w *databricks. } // getPathVariant returns the name of the path attribute for this resource type. -func (p resourcePermissions) getPathVariant() string { +func (p WorkspaceObjectPermissions) getPathVariant() string { if p.pathVariant != "" { return p.pathVariant } @@ -119,7 +119,7 @@ func (p resourcePermissions) getPathVariant() string { } // validate checks that the user is not trying to set permissions for the admin group or remove their own management permissions. -func (p resourcePermissions) validate(ctx context.Context, entity entity.PermissionsEntity, currentUsername string) error { +func (p WorkspaceObjectPermissions) validate(ctx context.Context, entity entity.PermissionsEntity, currentUsername string) error { for _, change := range entity.AccessControlList { // Prevent users from setting permissions for admins. if change.GroupName == "admins" && !p.allowConfiguringAdmins { @@ -128,7 +128,7 @@ func (p resourcePermissions) validate(ctx context.Context, entity entity.Permiss // Check that the user is preventing themselves from managing the object level := p.allowedPermissionLevels[string(change.PermissionLevel)] if (change.UserName == currentUsername || change.ServicePrincipalName == currentUsername) && !level.isManagementPermission { - allowedLevelsForCurrentUser := p.getAllowedPermissionLevels(false) + allowedLevelsForCurrentUser := p.GetAllowedPermissionLevels(false) return fmt.Errorf("cannot remove management permissions for the current user for %s, allowed levels: %s", p.objectType, strings.Join(allowedLevelsForCurrentUser, ", ")) } @@ -140,7 +140,7 @@ func (p resourcePermissions) validate(ctx context.Context, entity entity.Permiss } // getID returns the object ID for the given user-specified ID. -func (p resourcePermissions) getID(ctx context.Context, w *databricks.WorkspaceClient, id string) (string, error) { +func (p WorkspaceObjectPermissions) getID(ctx context.Context, w *databricks.WorkspaceClient, id string) (string, error) { var err error if p.idRetriever != nil { id, err = p.idRetriever(ctx, w, id) @@ -152,7 +152,7 @@ func (p resourcePermissions) getID(ctx context.Context, w *databricks.WorkspaceC } // prepareForUpdate prepares the access control list for an update request by calling all update customizers. -func (p resourcePermissions) prepareForUpdate(objectID string, e entity.PermissionsEntity, currentUser string) (entity.PermissionsEntity, error) { +func (p WorkspaceObjectPermissions) prepareForUpdate(objectID string, e entity.PermissionsEntity, currentUser string) (entity.PermissionsEntity, error) { cachedCurrentUser := func() (string, error) { return currentUser, nil } ctx := update.ACLCustomizerContext{ GetCurrentUser: cachedCurrentUser, @@ -169,7 +169,7 @@ func (p resourcePermissions) prepareForUpdate(objectID string, e entity.Permissi } // prepareForDelete prepares the access control list for a delete request by calling all delete customizers. -func (p resourcePermissions) prepareForDelete(objectACL *iam.ObjectPermissions, getCurrentUser func() (string, error)) ([]iam.AccessControlRequest, error) { +func (p WorkspaceObjectPermissions) prepareForDelete(objectACL *iam.ObjectPermissions, getCurrentUser func() (string, error)) ([]iam.AccessControlRequest, error) { accl := make([]iam.AccessControlRequest, 0, len(objectACL.AccessControlList)) // By default, only admins have access to a resource when databricks_permissions for that resource are deleted. for _, acl := range objectACL.AccessControlList { @@ -211,7 +211,7 @@ func (p resourcePermissions) prepareForDelete(objectACL *iam.ObjectPermissions, // For example, the SQL API previously used CAN_VIEW for read-only permission, but the GA API uses CAN_READ. Users may // have CAN_VIEW in their resource configuration, so the read customizer will rewrite the response from CAN_READ to // CAN_VIEW to match the user's configuration. -func (p resourcePermissions) prepareResponse(objectID string, objectACL *iam.ObjectPermissions, existing entity.PermissionsEntity, me string) (entity.PermissionsEntity, error) { +func (p WorkspaceObjectPermissions) prepareResponse(objectID string, objectACL *iam.ObjectPermissions, existing entity.PermissionsEntity, me string) (entity.PermissionsEntity, error) { ctx := read.ACLCustomizerContext{ GetId: func() string { return objectID }, GetExistingPermissionsEntity: func() entity.PermissionsEntity { return existing }, @@ -254,7 +254,7 @@ func (p resourcePermissions) prepareResponse(objectID string, objectACL *iam.Obj } // addOwnerPermissionIfNeeded adds the owner permission to the object ACL if the owner permission is allowed and not already set. -func (p resourcePermissions) addOwnerPermissionIfNeeded(objectACL []iam.AccessControlRequest, ownerOpt string) []iam.AccessControlRequest { +func (p WorkspaceObjectPermissions) addOwnerPermissionIfNeeded(objectACL []iam.AccessControlRequest, ownerOpt string) []iam.AccessControlRequest { _, ok := p.allowedPermissionLevels["IS_OWNER"] if !ok { return objectACL @@ -286,7 +286,8 @@ type permissionLevelOptions struct { deprecated string } -func getResourcePermissionsFromId(id string) (resourcePermissions, error) { +// GetResourcePermissionsFromId returns the resource permissions mapping for a given ID. +func GetResourcePermissionsFromId(id string) (WorkspaceObjectPermissions, error) { idParts := strings.Split(id, "/") objectType := strings.Join(idParts[1:len(idParts)-1], "/") for _, mapping := range allResourcePermissions() { @@ -300,11 +301,11 @@ func getResourcePermissionsFromId(id string) (resourcePermissions, error) { return mapping, nil } } - return resourcePermissions{}, fmt.Errorf("resource type for %s not found", id) + return WorkspaceObjectPermissions{}, fmt.Errorf("resource type for %s not found", id) } -// getResourcePermissionsFromState returns the resourcePermissions for the given state. -func getResourcePermissionsFromState(d interface{ GetOk(string) (any, bool) }) (resourcePermissions, string, error) { +// getResourcePermissionsFromState returns the WorkspaceObjectPermissions for the given state. +func getResourcePermissionsFromState(d interface{ GetOk(string) (any, bool) }) (WorkspaceObjectPermissions, string, error) { allPermissions := allResourcePermissions() for _, mapping := range allPermissions { if v, ok := d.GetOk(mapping.field); ok { @@ -325,12 +326,61 @@ func getResourcePermissionsFromState(d interface{ GetOk(string) (any, bool) }) ( allFields = append(allFields, mapping.field) } sort.Strings(allFields) - return resourcePermissions{}, "", fmt.Errorf("at least one type of resource identifier must be set; allowed fields: %s", strings.Join(allFields, ", ")) + return WorkspaceObjectPermissions{}, "", fmt.Errorf("at least one type of resource identifier must be set; allowed fields: %s", strings.Join(allFields, ", ")) } -// getResourcePermissionsForObjectAcl returns the resourcePermissions for the given ObjectAclApiResponse. -// allResourcePermissions is the list of all resource types that can be managed by the databricks_permissions resource. -func allResourcePermissions() []resourcePermissions { +// GetRequestObjectType returns the request object type for the permission mapping +func (p WorkspaceObjectPermissions) GetRequestObjectType() string { + return p.requestObjectType +} + +// GetObjectType returns the object type for the permission mapping +func (p WorkspaceObjectPermissions) GetObjectType() string { + return p.objectType +} + +// GetField returns the field name for the permission mapping +func (p WorkspaceObjectPermissions) GetField() string { + return p.field +} + +// GetID returns the object ID for the given user-specified ID +func (p WorkspaceObjectPermissions) GetID(ctx context.Context, w *databricks.WorkspaceClient, id string) (string, error) { + return p.getID(ctx, w, id) +} + +// GetResourcePermissionsFromFieldValue finds the appropriate resource permissions mapping +// by checking which field is set in the provided map +func GetResourcePermissionsFromFieldValue(fieldValues map[string]string) (WorkspaceObjectPermissions, string, error) { + allPermissions := allResourcePermissions() + for _, mapping := range allPermissions { + if val, ok := fieldValues[mapping.field]; ok && val != "" { + if mapping.stateMatcher != nil && !mapping.stateMatcher(val) { + continue + } + return mapping, val, nil + } + } + allFields := make([]string, 0, len(allPermissions)) + seen := make(map[string]struct{}) + for _, mapping := range allPermissions { + if _, ok := seen[mapping.field]; ok { + continue + } + seen[mapping.field] = struct{}{} + allFields = append(allFields, mapping.field) + } + sort.Strings(allFields) + return WorkspaceObjectPermissions{}, "", fmt.Errorf("at least one type of resource identifier must be set; allowed fields: %s", strings.Join(allFields, ", ")) +} + +// AllResourcePermissions returns all permission type definitions. +// Exported for use by Plugin Framework resources to dynamically generate schemas. +func AllResourcePermissions() []WorkspaceObjectPermissions { + return allResourcePermissions() +} + +func allResourcePermissions() []WorkspaceObjectPermissions { PATH := func(ctx context.Context, w *databricks.WorkspaceClient, path string) (string, error) { info, err := w.Workspace.GetStatusByPath(ctx, path) if err != nil { @@ -344,7 +394,7 @@ func allResourcePermissions() []resourcePermissions { rewriteCanReadToCanView := read.RewritePermissions(map[iam.PermissionLevel]iam.PermissionLevel{ iam.PermissionLevelCanRead: iam.PermissionLevelCanView, }) - return []resourcePermissions{ + return []WorkspaceObjectPermissions{ { field: "cluster_policy_id", objectType: "cluster-policy", diff --git a/permissions/resource_permission_acc_test.go b/permissions/resource_permission_acc_test.go new file mode 100644 index 0000000000..aa321f225c --- /dev/null +++ b/permissions/resource_permission_acc_test.go @@ -0,0 +1,518 @@ +package permissions_test + +import ( + "context" + "fmt" + "testing" + + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/databricks/terraform-provider-databricks/internal/acceptance" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/stretchr/testify/assert" +) + +func TestAccPermission_Cluster(t *testing.T) { + acceptance.LoadDebugEnvIfRunsFromIDE(t, "workspace") + clusterTemplate := ` +data "databricks_spark_version" "latest" { +} + +resource "databricks_cluster" "this" { + cluster_name = "singlenode-{var.RANDOM}" + spark_version = data.databricks_spark_version.latest.id + instance_pool_id = "{env.TEST_INSTANCE_POOL_ID}" + num_workers = 0 + autotermination_minutes = 10 + spark_conf = { + "spark.databricks.cluster.profile" = "singleNode" + "spark.master" = "local[*]" + } + custom_tags = { + "ResourceClass" = "SingleNode" + } +} + +resource "databricks_group" "group1" { + display_name = "permission-group1-{var.RANDOM}" +} + +resource "databricks_group" "group2" { + display_name = "permission-group2-{var.RANDOM}" +} + +resource "databricks_permission" "cluster_group1" { + cluster_id = databricks_cluster.this.id + group_name = databricks_group.group1.display_name + permission_level = "CAN_ATTACH_TO" +} + +resource "databricks_permission" "cluster_group2" { + cluster_id = databricks_cluster.this.id + group_name = databricks_group.group2.display_name + permission_level = "CAN_RESTART" +} +` + + acceptance.WorkspaceLevel(t, acceptance.Step{ + Template: clusterTemplate, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("databricks_permission.cluster_group1", "permission_level", "CAN_ATTACH_TO"), + resource.TestCheckResourceAttr("databricks_permission.cluster_group2", "permission_level", "CAN_RESTART"), + func(s *terraform.State) error { + w := databricks.Must(databricks.NewWorkspaceClient()) + clusterId := s.RootModule().Resources["databricks_cluster.this"].Primary.ID + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "clusters", clusterId) + assert.NoError(t, err) + + group1Name := s.RootModule().Resources["databricks_group.group1"].Primary.Attributes["display_name"] + group2Name := s.RootModule().Resources["databricks_group.group2"].Primary.Attributes["display_name"] + + // Verify both permissions exist + foundGroup1 := false + foundGroup2 := false + for _, acl := range permissions.AccessControlList { + if acl.GroupName == group1Name { + assert.Equal(t, iam.PermissionLevelCanAttachTo, acl.AllPermissions[0].PermissionLevel) + foundGroup1 = true + } + if acl.GroupName == group2Name { + assert.Equal(t, iam.PermissionLevelCanRestart, acl.AllPermissions[0].PermissionLevel) + foundGroup2 = true + } + } + assert.True(t, foundGroup1, "Group1 permission not found") + assert.True(t, foundGroup2, "Group2 permission not found") + return nil + }, + ), + }) +} + +func TestAccPermission_Job(t *testing.T) { + acceptance.LoadDebugEnvIfRunsFromIDE(t, "workspace") + template := ` +resource "databricks_job" "this" { + name = "permission-job-{var.RANDOM}" +} + +resource "databricks_group" "viewers" { + display_name = "permission-viewers-{var.RANDOM}" +} + +resource "databricks_user" "test_user" { + user_name = "permission-test-{var.RANDOM}@example.com" + force = true +} + +resource "databricks_permission" "job_group" { + job_id = databricks_job.this.id + group_name = databricks_group.viewers.display_name + permission_level = "CAN_VIEW" +} + +resource "databricks_permission" "job_user" { + job_id = databricks_job.this.id + user_name = databricks_user.test_user.user_name + permission_level = "CAN_MANAGE_RUN" +} +` + + acceptance.WorkspaceLevel(t, acceptance.Step{ + Template: template, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("databricks_permission.job_group", "permission_level", "CAN_VIEW"), + resource.TestCheckResourceAttr("databricks_permission.job_user", "permission_level", "CAN_MANAGE_RUN"), + resource.TestCheckResourceAttr("databricks_permission.job_group", "object_type", "jobs"), + ), + }) +} + +func TestAccPermission_Notebook(t *testing.T) { + acceptance.LoadDebugEnvIfRunsFromIDE(t, "workspace") + template := ` +resource "databricks_directory" "this" { + path = "/permission-test-{var.RANDOM}" +} + +resource "databricks_notebook" "this" { + source = "{var.CWD}/../storage/testdata/tf-test-python.py" + path = "${databricks_directory.this.path}/test_notebook" +} + +resource "databricks_group" "editors" { + display_name = "permission-editors-{var.RANDOM}" +} + +resource "databricks_group" "runners" { + display_name = "permission-runners-{var.RANDOM}" +} + +resource "databricks_permission" "notebook_editors" { + notebook_path = databricks_notebook.this.path + group_name = databricks_group.editors.display_name + permission_level = "CAN_EDIT" +} + +resource "databricks_permission" "notebook_runners" { + notebook_path = databricks_notebook.this.path + group_name = databricks_group.runners.display_name + permission_level = "CAN_RUN" +} +` + + acceptance.WorkspaceLevel(t, acceptance.Step{ + Template: template, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("databricks_permission.notebook_editors", "permission_level", "CAN_EDIT"), + resource.TestCheckResourceAttr("databricks_permission.notebook_runners", "permission_level", "CAN_RUN"), + ), + }) +} + +func TestAccPermission_Authorization_Tokens(t *testing.T) { + acceptance.LoadDebugEnvIfRunsFromIDE(t, "workspace") + template := ` +resource "databricks_group" "team_a" { + display_name = "permission-team-a-{var.RANDOM}" +} + +resource "databricks_group" "team_b" { + display_name = "permission-team-b-{var.RANDOM}" +} + +resource "databricks_group" "team_c" { + display_name = "permission-team-c-{var.RANDOM}" +} + +# This demonstrates the key benefit: each team's token permissions +# can be managed independently, unlike databricks_permissions +resource "databricks_permission" "tokens_team_a" { + authorization = "tokens" + group_name = databricks_group.team_a.display_name + permission_level = "CAN_USE" +} + +resource "databricks_permission" "tokens_team_b" { + authorization = "tokens" + group_name = databricks_group.team_b.display_name + permission_level = "CAN_USE" +} + +resource "databricks_permission" "tokens_team_c" { + authorization = "tokens" + group_name = databricks_group.team_c.display_name + permission_level = "CAN_USE" +} +` + + acceptance.WorkspaceLevel(t, acceptance.Step{ + Template: template, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("databricks_permission.tokens_team_a", "authorization", "tokens"), + resource.TestCheckResourceAttr("databricks_permission.tokens_team_a", "permission_level", "CAN_USE"), + resource.TestCheckResourceAttr("databricks_permission.tokens_team_b", "authorization", "tokens"), + resource.TestCheckResourceAttr("databricks_permission.tokens_team_c", "authorization", "tokens"), + func(s *terraform.State) error { + w := databricks.Must(databricks.NewWorkspaceClient()) + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "authorization", "tokens") + assert.NoError(t, err) + + teamA := s.RootModule().Resources["databricks_group.team_a"].Primary.Attributes["display_name"] + teamB := s.RootModule().Resources["databricks_group.team_b"].Primary.Attributes["display_name"] + teamC := s.RootModule().Resources["databricks_group.team_c"].Primary.Attributes["display_name"] + + foundA, foundB, foundC := false, false, false + for _, acl := range permissions.AccessControlList { + if acl.GroupName == teamA { + foundA = true + } + if acl.GroupName == teamB { + foundB = true + } + if acl.GroupName == teamC { + foundC = true + } + } + assert.True(t, foundA, "Team A permission not found") + assert.True(t, foundB, "Team B permission not found") + assert.True(t, foundC, "Team C permission not found") + return nil + }, + ), + }, acceptance.Step{ + // Update one permission independently - remove team_b + Template: ` +resource "databricks_group" "team_a" { + display_name = "permission-team-a-{var.RANDOM}" +} + +resource "databricks_group" "team_b" { + display_name = "permission-team-b-{var.RANDOM}" +} + +resource "databricks_group" "team_c" { + display_name = "permission-team-c-{var.RANDOM}" +} + +resource "databricks_permission" "tokens_team_a" { + authorization = "tokens" + group_name = databricks_group.team_a.display_name + permission_level = "CAN_USE" +} + +resource "databricks_permission" "tokens_team_c" { + authorization = "tokens" + group_name = databricks_group.team_c.display_name + permission_level = "CAN_USE" +} +`, + Check: func(s *terraform.State) error { + w := databricks.Must(databricks.NewWorkspaceClient()) + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "authorization", "tokens") + assert.NoError(t, err) + + teamA := s.RootModule().Resources["databricks_group.team_a"].Primary.Attributes["display_name"] + teamB := s.RootModule().Resources["databricks_group.team_b"].Primary.Attributes["display_name"] + teamC := s.RootModule().Resources["databricks_group.team_c"].Primary.Attributes["display_name"] + + foundA, foundB, foundC := false, false, false + for _, acl := range permissions.AccessControlList { + if acl.GroupName == teamA { + foundA = true + } + if acl.GroupName == teamB { + foundB = true + } + if acl.GroupName == teamC { + foundC = true + } + } + assert.True(t, foundA, "Team A permission should still exist") + assert.False(t, foundB, "Team B permission should be removed") + assert.True(t, foundC, "Team C permission should still exist") + return nil + }, + }) +} + +func TestAccPermission_Update(t *testing.T) { + acceptance.LoadDebugEnvIfRunsFromIDE(t, "workspace") + template1 := ` +resource "databricks_job" "this" { + name = "permission-update-{var.RANDOM}" +} + +resource "databricks_group" "test" { + display_name = "permission-update-{var.RANDOM}" +} + +resource "databricks_permission" "job_group" { + job_id = databricks_job.this.id + group_name = databricks_group.test.display_name + permission_level = "CAN_VIEW" +} +` + + template2 := ` +resource "databricks_job" "this" { + name = "permission-update-{var.RANDOM}" +} + +resource "databricks_group" "test" { + display_name = "permission-update-{var.RANDOM}" +} + +resource "databricks_permission" "job_group" { + job_id = databricks_job.this.id + group_name = databricks_group.test.display_name + permission_level = "CAN_MANAGE" +} +` + + acceptance.WorkspaceLevel(t, acceptance.Step{ + Template: template1, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("databricks_permission.job_group", "permission_level", "CAN_VIEW"), + ), + }, acceptance.Step{ + Template: template2, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("databricks_permission.job_group", "permission_level", "CAN_MANAGE"), + func(s *terraform.State) error { + w := databricks.Must(databricks.NewWorkspaceClient()) + jobId := s.RootModule().Resources["databricks_job.this"].Primary.ID + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "jobs", jobId) + assert.NoError(t, err) + + groupName := s.RootModule().Resources["databricks_group.test"].Primary.Attributes["display_name"] + for _, acl := range permissions.AccessControlList { + if acl.GroupName == groupName { + assert.Equal(t, iam.PermissionLevelCanManage, acl.AllPermissions[0].PermissionLevel) + return nil + } + } + return fmt.Errorf("permission not found for group %s", groupName) + }, + ), + }) +} + +func TestAccPermission_ServicePrincipal(t *testing.T) { + acceptance.LoadDebugEnvIfRunsFromIDE(t, "workspace") + if acceptance.IsGcp(t) { + acceptance.Skipf(t)("Service principals have different behavior on GCP") + } + + template := ` +resource "databricks_job" "this" { + name = "permission-sp-{var.RANDOM}" +} + +resource "databricks_service_principal" "sp" { + display_name = "permission-sp-{var.RANDOM}" +} + +resource "databricks_permission" "job_sp" { + job_id = databricks_job.this.id + service_principal_name = databricks_service_principal.sp.application_id + permission_level = "CAN_MANAGE" +} +` + + acceptance.WorkspaceLevel(t, acceptance.Step{ + Template: template, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("databricks_permission.job_sp", "permission_level", "CAN_MANAGE"), + resource.TestCheckResourceAttrSet("databricks_permission.job_sp", "service_principal_name"), + ), + }) +} + +func TestAccPermission_Import(t *testing.T) { + acceptance.LoadDebugEnvIfRunsFromIDE(t, "workspace") + template := ` +resource "databricks_job" "this" { + name = "permission-import-{var.RANDOM}" +} + +resource "databricks_group" "test" { + display_name = "permission-import-{var.RANDOM}" +} + +resource "databricks_permission" "job_group" { + job_id = databricks_job.this.id + group_name = databricks_group.test.display_name + permission_level = "CAN_VIEW" +} +` + + acceptance.WorkspaceLevel(t, acceptance.Step{ + Template: template, + }, acceptance.Step{ + Template: template, + ResourceName: "databricks_permission.job_group", + ImportState: true, + ImportStateVerify: true, + }) +} + +func TestAccPermission_SqlEndpoint(t *testing.T) { + acceptance.LoadDebugEnvIfRunsFromIDE(t, "workspace") + template := ` +resource "databricks_sql_endpoint" "this" { + name = "permission-sql-{var.RANDOM}" + cluster_size = "2X-Small" + tags { + custom_tags { + key = "Owner" + value = "eng-dev-ecosystem-team_at_databricks.com" + } + } +} + +resource "databricks_group" "sql_users" { + display_name = "permission-sql-users-{var.RANDOM}" +} + +resource "databricks_permission" "warehouse_users" { + sql_endpoint_id = databricks_sql_endpoint.this.id + group_name = databricks_group.sql_users.display_name + permission_level = "CAN_USE" +} +` + + acceptance.WorkspaceLevel(t, acceptance.Step{ + Template: template, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("databricks_permission.warehouse_users", "permission_level", "CAN_USE"), + ), + }) +} + +func TestAccPermission_InstancePool(t *testing.T) { + acceptance.LoadDebugEnvIfRunsFromIDE(t, "workspace") + template := ` +data "databricks_node_type" "smallest" { + local_disk = true +} + +resource "databricks_instance_pool" "this" { + instance_pool_name = "permission-pool-{var.RANDOM}" + min_idle_instances = 0 + max_capacity = 1 + node_type_id = data.databricks_node_type.smallest.id + idle_instance_autotermination_minutes = 10 +} + +resource "databricks_group" "pool_users" { + display_name = "permission-pool-users-{var.RANDOM}" +} + +resource "databricks_permission" "pool_access" { + instance_pool_id = databricks_instance_pool.this.id + group_name = databricks_group.pool_users.display_name + permission_level = "CAN_ATTACH_TO" +} +` + + acceptance.WorkspaceLevel(t, acceptance.Step{ + Template: template, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("databricks_permission.pool_access", "permission_level", "CAN_ATTACH_TO"), + ), + }) +} + +func TestAccPermission_ClusterPolicy(t *testing.T) { + acceptance.LoadDebugEnvIfRunsFromIDE(t, "workspace") + template := ` +resource "databricks_cluster_policy" "this" { + name = "permission-policy-{var.RANDOM}" + definition = jsonencode({ + "spark_conf.spark.hadoop.javax.jdo.option.ConnectionURL": { + "type": "fixed", + "value": "jdbc:sqlserver://" + } + }) +} + +resource "databricks_group" "policy_users" { + display_name = "permission-policy-users-{var.RANDOM}" +} + +resource "databricks_permission" "policy_access" { + cluster_policy_id = databricks_cluster_policy.this.id + group_name = databricks_group.policy_users.display_name + permission_level = "CAN_USE" +} +` + + acceptance.WorkspaceLevel(t, acceptance.Step{ + Template: template, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("databricks_permission.policy_access", "permission_level", "CAN_USE"), + ), + }) +} diff --git a/permissions/resource_permissions.go b/permissions/resource_permissions.go index 4655cc6f89..30046fa816 100644 --- a/permissions/resource_permissions.go +++ b/permissions/resource_permissions.go @@ -30,7 +30,7 @@ type PermissionsAPI struct { } // safePutWithOwner is a workaround for the limitation where warehouse without owners cannot have IS_OWNER set -func (a PermissionsAPI) safePutWithOwner(objectID string, objectACL []iam.AccessControlRequest, mapping resourcePermissions, ownerOpt string) error { +func (a PermissionsAPI) safePutWithOwner(objectID string, objectACL []iam.AccessControlRequest, mapping WorkspaceObjectPermissions, ownerOpt string) error { w, err := a.client.WorkspaceClient() if err != nil { return err @@ -69,7 +69,7 @@ func (a PermissionsAPI) getCurrentUser() (string, error) { } // Update updates object permissions. Technically, it's using method named SetOrDelete, but here we do more -func (a PermissionsAPI) Update(objectID string, entity entity.PermissionsEntity, mapping resourcePermissions) error { +func (a PermissionsAPI) Update(objectID string, entity entity.PermissionsEntity, mapping WorkspaceObjectPermissions) error { currentUser, err := a.getCurrentUser() if err != nil { return err @@ -91,7 +91,7 @@ func (a PermissionsAPI) Update(objectID string, entity entity.PermissionsEntity, // Delete gracefully removes permissions of non-admin users. After this operation, the object is managed // by the current user and admin group. If the resource has IS_OWNER permissions, they are reset to the // object creator, if it can be determined. -func (a PermissionsAPI) Delete(objectID string, mapping resourcePermissions) error { +func (a PermissionsAPI) Delete(objectID string, mapping WorkspaceObjectPermissions) error { if mapping.objectType == "pipelines" { // There is a bug which causes the code below send IS_OWNER with run_as identity // Which is of course wrong thing to do. @@ -122,7 +122,7 @@ func (a PermissionsAPI) Delete(objectID string, mapping resourcePermissions) err return a.safePutWithOwner(objectID, accl, mapping, resourceStatus.creator) } -func (a PermissionsAPI) readRaw(objectID string, mapping resourcePermissions) (*iam.ObjectPermissions, error) { +func (a PermissionsAPI) readRaw(objectID string, mapping WorkspaceObjectPermissions) (*iam.ObjectPermissions, error) { w, err := a.client.WorkspaceClient() if err != nil { return nil, err @@ -156,7 +156,7 @@ func (a PermissionsAPI) readRaw(objectID string, mapping resourcePermissions) (* } // Read gets all relevant permissions for the object, including inherited ones -func (a PermissionsAPI) Read(objectID string, mapping resourcePermissions, existing entity.PermissionsEntity, me string) (entity.PermissionsEntity, error) { +func (a PermissionsAPI) Read(objectID string, mapping WorkspaceObjectPermissions, existing entity.PermissionsEntity, me string) (entity.PermissionsEntity, error) { permissions, err := a.readRaw(objectID, mapping) if err != nil { return entity.PermissionsEntity{}, err @@ -164,7 +164,7 @@ func (a PermissionsAPI) Read(objectID string, mapping resourcePermissions, exist return mapping.prepareResponse(objectID, permissions, existing, me) } -// ResourcePermissions definition +// ResourcePermissions is the SDKv2 resource definition for databricks_permissions func ResourcePermissions() common.Resource { s := common.StructToSchema(entity.PermissionsEntity{}, func(s map[string]*schema.Schema) map[string]*schema.Schema { for _, mapping := range allResourcePermissions() { @@ -231,14 +231,14 @@ func ResourcePermissions() common.Resource { // is not aware of. if _, ok := mapping.allowedPermissionLevels[string(permissionLevel)]; !ok { return fmt.Errorf(`permission_level %s is not supported with %s objects; allowed levels: %s`, - permissionLevel, mapping.field, strings.Join(mapping.getAllowedPermissionLevels(true), ", ")) + permissionLevel, mapping.field, strings.Join(mapping.GetAllowedPermissionLevels(true), ", ")) } } return nil }, Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { a := NewPermissionsAPI(ctx, c) - mapping, err := getResourcePermissionsFromId(d.Id()) + mapping, err := GetResourcePermissionsFromId(d.Id()) if err != nil { return err } @@ -293,14 +293,14 @@ func ResourcePermissions() common.Resource { Update: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { var entity entity.PermissionsEntity common.DataToStructPointer(d, s, &entity) - mapping, err := getResourcePermissionsFromId(d.Id()) + mapping, err := GetResourcePermissionsFromId(d.Id()) if err != nil { return err } return NewPermissionsAPI(ctx, c).Update(d.Id(), entity, mapping) }, Delete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { - mapping, err := getResourcePermissionsFromId(d.Id()) + mapping, err := GetResourcePermissionsFromId(d.Id()) if err != nil { return err } diff --git a/permissions/resource_permissions_test.go b/permissions/resource_permissions_test.go index ffcc1923fc..08fc3e5be5 100644 --- a/permissions/resource_permissions_test.go +++ b/permissions/resource_permissions_test.go @@ -1329,7 +1329,7 @@ func TestResourcePermissionsUpdate(t *testing.T) { assert.Equal(t, "CAN_VIEW", firstElem["permission_level"]) } -func getResourcePermissions(field, objectType string) resourcePermissions { +func getResourcePermissions(field, objectType string) WorkspaceObjectPermissions { for _, mapping := range allResourcePermissions() { if mapping.field == field && mapping.objectType == objectType { return mapping From d7e9b53ab573ea7e45ec94a15b07d7aba486b51d Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Wed, 29 Oct 2025 12:00:58 +0100 Subject: [PATCH 2/5] remove unneeded code --- .../products/permissions/object_id_helpers.go | 91 --------- .../permissions/object_id_helpers_test.go | 180 ------------------ 2 files changed, 271 deletions(-) delete mode 100644 internal/providers/pluginfw/products/permissions/object_id_helpers.go delete mode 100644 internal/providers/pluginfw/products/permissions/object_id_helpers_test.go diff --git a/internal/providers/pluginfw/products/permissions/object_id_helpers.go b/internal/providers/pluginfw/products/permissions/object_id_helpers.go deleted file mode 100644 index 06fefa9f7b..0000000000 --- a/internal/providers/pluginfw/products/permissions/object_id_helpers.go +++ /dev/null @@ -1,91 +0,0 @@ -package permissions - -import ( - "context" - "maps" - "slices" - - "github.com/hashicorp/terraform-plugin-framework/tfsdk" - "github.com/hashicorp/terraform-plugin-framework/types" -) - -// ObjectIdentifiers is a struct containing all object identifier fields -// This is used by both PermissionResourceModel and the validator -type ObjectIdentifiers struct { - ClusterId types.String `tfsdk:"cluster_id"` - ClusterPolicyId types.String `tfsdk:"cluster_policy_id"` - InstancePoolId types.String `tfsdk:"instance_pool_id"` - JobId types.String `tfsdk:"job_id"` - PipelineId types.String `tfsdk:"pipeline_id"` - NotebookId types.String `tfsdk:"notebook_id"` - NotebookPath types.String `tfsdk:"notebook_path"` - DirectoryId types.String `tfsdk:"directory_id"` - DirectoryPath types.String `tfsdk:"directory_path"` - WorkspaceFileId types.String `tfsdk:"workspace_file_id"` - WorkspaceFilePath types.String `tfsdk:"workspace_file_path"` - RegisteredModelId types.String `tfsdk:"registered_model_id"` - ExperimentId types.String `tfsdk:"experiment_id"` - SqlDashboardId types.String `tfsdk:"sql_dashboard_id"` - SqlEndpointId types.String `tfsdk:"sql_endpoint_id"` - SqlQueryId types.String `tfsdk:"sql_query_id"` - SqlAlertId types.String `tfsdk:"sql_alert_id"` - DashboardId types.String `tfsdk:"dashboard_id"` - RepoId types.String `tfsdk:"repo_id"` - RepoPath types.String `tfsdk:"repo_path"` - Authorization types.String `tfsdk:"authorization"` - ServingEndpointId types.String `tfsdk:"serving_endpoint_id"` - VectorSearchEndpointId types.String `tfsdk:"vector_search_endpoint_id"` - AppName types.String `tfsdk:"app_name"` - DatabaseInstanceName types.String `tfsdk:"database_instance_name"` - AlertV2Id types.String `tfsdk:"alert_v2_id"` -} - -// ToFieldValuesMap converts the ObjectIdentifiers struct to a map of field names to values -func (o *ObjectIdentifiers) ToFieldValuesMap() map[string]string { - return map[string]string{ - "cluster_id": o.ClusterId.ValueString(), - "cluster_policy_id": o.ClusterPolicyId.ValueString(), - "instance_pool_id": o.InstancePoolId.ValueString(), - "job_id": o.JobId.ValueString(), - "pipeline_id": o.PipelineId.ValueString(), - "notebook_id": o.NotebookId.ValueString(), - "notebook_path": o.NotebookPath.ValueString(), - "directory_id": o.DirectoryId.ValueString(), - "directory_path": o.DirectoryPath.ValueString(), - "workspace_file_id": o.WorkspaceFileId.ValueString(), - "workspace_file_path": o.WorkspaceFilePath.ValueString(), - "registered_model_id": o.RegisteredModelId.ValueString(), - "experiment_id": o.ExperimentId.ValueString(), - "sql_dashboard_id": o.SqlDashboardId.ValueString(), - "sql_endpoint_id": o.SqlEndpointId.ValueString(), - "sql_query_id": o.SqlQueryId.ValueString(), - "sql_alert_id": o.SqlAlertId.ValueString(), - "dashboard_id": o.DashboardId.ValueString(), - "repo_id": o.RepoId.ValueString(), - "repo_path": o.RepoPath.ValueString(), - "authorization": o.Authorization.ValueString(), - "serving_endpoint_id": o.ServingEndpointId.ValueString(), - "vector_search_endpoint_id": o.VectorSearchEndpointId.ValueString(), - "app_name": o.AppName.ValueString(), - "database_instance_name": o.DatabaseInstanceName.ValueString(), - "alert_v2_id": o.AlertV2Id.ValueString(), - } -} - -// GetObjectIdentifierFields returns all possible object identifier field names -// This is derived from the keys of ToFieldValuesMap() to maintain single source of truth -func GetObjectIdentifierFields() []string { - var empty ObjectIdentifiers - return slices.Collect(maps.Keys(empty.ToFieldValuesMap())) -} - -// ExtractObjectIdentifiersFromConfig reads object identifiers from a tfsdk.Config -// Returns nil if there are errors reading the config -func ExtractObjectIdentifiersFromConfig(ctx context.Context, config tfsdk.Config) *ObjectIdentifiers { - var objectIds ObjectIdentifiers - diags := config.Get(ctx, &objectIds) - if diags.HasError() { - return nil - } - return &objectIds -} diff --git a/internal/providers/pluginfw/products/permissions/object_id_helpers_test.go b/internal/providers/pluginfw/products/permissions/object_id_helpers_test.go deleted file mode 100644 index b41b2392ac..0000000000 --- a/internal/providers/pluginfw/products/permissions/object_id_helpers_test.go +++ /dev/null @@ -1,180 +0,0 @@ -package permissions - -import ( - "testing" - - "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/stretchr/testify/assert" -) - -func TestObjectIdentifiers_ToFieldValuesMap(t *testing.T) { - tests := []struct { - name string - objIds ObjectIdentifiers - expected map[string]string - }{ - { - name: "cluster_id set", - objIds: ObjectIdentifiers{ - ClusterId: types.StringValue("cluster-123"), - }, - expected: map[string]string{ - "cluster_id": "cluster-123", - // All other fields should be empty strings - "cluster_policy_id": "", - "instance_pool_id": "", - "job_id": "", - "pipeline_id": "", - "notebook_id": "", - "notebook_path": "", - "directory_id": "", - "directory_path": "", - "workspace_file_id": "", - "workspace_file_path": "", - "registered_model_id": "", - "experiment_id": "", - "sql_dashboard_id": "", - "sql_endpoint_id": "", - "sql_query_id": "", - "sql_alert_id": "", - "dashboard_id": "", - "repo_id": "", - "repo_path": "", - "authorization": "", - "serving_endpoint_id": "", - "vector_search_endpoint_id": "", - "app_name": "", - "database_instance_name": "", - "alert_v2_id": "", - }, - }, - { - name: "multiple fields set", - objIds: ObjectIdentifiers{ - JobId: types.StringValue("job-456"), - Authorization: types.StringValue("tokens"), - NotebookPath: types.StringValue("/Users/test/notebook"), - }, - expected: map[string]string{ - "cluster_id": "", - "cluster_policy_id": "", - "instance_pool_id": "", - "job_id": "job-456", - "pipeline_id": "", - "notebook_id": "", - "notebook_path": "/Users/test/notebook", - "directory_id": "", - "directory_path": "", - "workspace_file_id": "", - "workspace_file_path": "", - "registered_model_id": "", - "experiment_id": "", - "sql_dashboard_id": "", - "sql_endpoint_id": "", - "sql_query_id": "", - "sql_alert_id": "", - "dashboard_id": "", - "repo_id": "", - "repo_path": "", - "authorization": "tokens", - "serving_endpoint_id": "", - "vector_search_endpoint_id": "", - "app_name": "", - "database_instance_name": "", - "alert_v2_id": "", - }, - }, - { - name: "all fields empty", - objIds: ObjectIdentifiers{}, - expected: map[string]string{ - "cluster_id": "", - "cluster_policy_id": "", - "instance_pool_id": "", - "job_id": "", - "pipeline_id": "", - "notebook_id": "", - "notebook_path": "", - "directory_id": "", - "directory_path": "", - "workspace_file_id": "", - "workspace_file_path": "", - "registered_model_id": "", - "experiment_id": "", - "sql_dashboard_id": "", - "sql_endpoint_id": "", - "sql_query_id": "", - "sql_alert_id": "", - "dashboard_id": "", - "repo_id": "", - "repo_path": "", - "authorization": "", - "serving_endpoint_id": "", - "vector_search_endpoint_id": "", - "app_name": "", - "database_instance_name": "", - "alert_v2_id": "", - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := tt.objIds.ToFieldValuesMap() - - // Check that all expected keys are present - assert.Len(t, result, len(tt.expected), "Map should have correct number of entries") - - // Check specific values we care about - for key, expectedValue := range tt.expected { - assert.Contains(t, result, key, "Map should contain key: %s", key) - assert.Equal(t, expectedValue, result[key], "Value for key %s should match", key) - } - }) - } -} - -func TestGetObjectIdentifierFields(t *testing.T) { - // Verify that GetObjectIdentifierFields returns all expected fields - expectedFields := []string{ - "cluster_id", - "cluster_policy_id", - "instance_pool_id", - "job_id", - "pipeline_id", - "notebook_id", - "notebook_path", - "directory_id", - "directory_path", - "workspace_file_id", - "workspace_file_path", - "registered_model_id", - "experiment_id", - "sql_dashboard_id", - "sql_endpoint_id", - "sql_query_id", - "sql_alert_id", - "dashboard_id", - "repo_id", - "repo_path", - "authorization", - "serving_endpoint_id", - "vector_search_endpoint_id", - "app_name", - "database_instance_name", - "alert_v2_id", - } - - fields := GetObjectIdentifierFields() - assert.Len(t, fields, len(expectedFields), "Should have all object identifier fields") - - // Check that all expected fields are present - fieldMap := make(map[string]bool) - for _, field := range fields { - fieldMap[field] = true - } - - for _, expected := range expectedFields { - assert.True(t, fieldMap[expected], "GetObjectIdentifierFields() should contain: %s", expected) - } -} From d1a5c7bae58be34437b2edbf8b02c8d7d6c3e58d Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Wed, 29 Oct 2025 12:13:51 +0100 Subject: [PATCH 3/5] Fix race condition when deleting individual permissions --- .../products/permissions/object_mutex.go | 62 +++++++ .../products/permissions/object_mutex_test.go | 167 ++++++++++++++++++ .../permissions/resource_permission.go | 14 ++ 3 files changed, 243 insertions(+) create mode 100644 internal/providers/pluginfw/products/permissions/object_mutex.go create mode 100644 internal/providers/pluginfw/products/permissions/object_mutex_test.go diff --git a/internal/providers/pluginfw/products/permissions/object_mutex.go b/internal/providers/pluginfw/products/permissions/object_mutex.go new file mode 100644 index 0000000000..06a06ceba4 --- /dev/null +++ b/internal/providers/pluginfw/products/permissions/object_mutex.go @@ -0,0 +1,62 @@ +package permissions + +import ( + "sync" +) + +// objectMutexManager manages mutexes per object ID to prevent concurrent +// operations on the same Databricks object that could lead to race conditions. +// +// This is particularly important for Delete operations where multiple +// databricks_permission resources for the same object might be deleted +// concurrently, each doing GET -> filter -> SET, which could result in +// lost permission updates. +type objectMutexManager struct { + mutexes map[string]*sync.Mutex + mapLock sync.Mutex +} + +// globalObjectMutexManager is the singleton instance used by all permission resources +var globalObjectMutexManager = &objectMutexManager{ + mutexes: make(map[string]*sync.Mutex), +} + +// Lock acquires a mutex for the given object ID. +// Each object ID gets its own mutex to allow concurrent operations on different objects +// while serializing operations on the same object. +func (m *objectMutexManager) Lock(objectID string) { + m.mapLock.Lock() + mu, exists := m.mutexes[objectID] + if !exists { + mu = &sync.Mutex{} + m.mutexes[objectID] = mu + } + m.mapLock.Unlock() + + // Lock the object-specific mutex (outside the map lock to avoid deadlock) + mu.Lock() +} + +// Unlock releases the mutex for the given object ID. +func (m *objectMutexManager) Unlock(objectID string) { + m.mapLock.Lock() + mu, exists := m.mutexes[objectID] + m.mapLock.Unlock() + + if exists { + mu.Unlock() + } +} + +// lockObject acquires a lock for the given object ID. +// This should be called at the start of any operation that modifies permissions. +func lockObject(objectID string) { + globalObjectMutexManager.Lock(objectID) +} + +// unlockObject releases the lock for the given object ID. +// This should be called at the end of any operation that modifies permissions. +// Use defer to ensure it's always called even if the operation panics. +func unlockObject(objectID string) { + globalObjectMutexManager.Unlock(objectID) +} diff --git a/internal/providers/pluginfw/products/permissions/object_mutex_test.go b/internal/providers/pluginfw/products/permissions/object_mutex_test.go new file mode 100644 index 0000000000..cc58bab7a0 --- /dev/null +++ b/internal/providers/pluginfw/products/permissions/object_mutex_test.go @@ -0,0 +1,167 @@ +package permissions + +import ( + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestObjectMutexManager_LockUnlock(t *testing.T) { + manager := &objectMutexManager{ + mutexes: make(map[string]*sync.Mutex), + } + + objectID := "/clusters/test-cluster" + + // First lock should succeed immediately + manager.Lock(objectID) + + // Verify mutex was created + manager.mapLock.Lock() + _, exists := manager.mutexes[objectID] + manager.mapLock.Unlock() + assert.True(t, exists, "Mutex should be created for object ID") + + // Unlock + manager.Unlock(objectID) +} + +func TestObjectMutexManager_ConcurrentAccess(t *testing.T) { + manager := &objectMutexManager{ + mutexes: make(map[string]*sync.Mutex), + } + + objectID := "/clusters/test-cluster" + var counter int32 + var wg sync.WaitGroup + + // Simulate 10 concurrent operations on the same object + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + manager.Lock(objectID) + defer manager.Unlock(objectID) + + // Simulate some work with a counter + // Without proper locking, this would cause race conditions + current := atomic.LoadInt32(&counter) + time.Sleep(time.Millisecond) // Simulate some processing + atomic.StoreInt32(&counter, current+1) + }() + } + + wg.Wait() + + // All 10 operations should have completed + assert.Equal(t, int32(10), atomic.LoadInt32(&counter), "All operations should complete") +} + +func TestObjectMutexManager_DifferentObjects(t *testing.T) { + manager := &objectMutexManager{ + mutexes: make(map[string]*sync.Mutex), + } + + objectID1 := "/clusters/cluster-1" + objectID2 := "/clusters/cluster-2" + + var wg sync.WaitGroup + results := make([]string, 0, 2) + var resultsMutex sync.Mutex + + // Operations on different objects should run concurrently + wg.Add(2) + + go func() { + defer wg.Done() + manager.Lock(objectID1) + defer manager.Unlock(objectID1) + + time.Sleep(50 * time.Millisecond) + resultsMutex.Lock() + results = append(results, "object1") + resultsMutex.Unlock() + }() + + go func() { + defer wg.Done() + manager.Lock(objectID2) + defer manager.Unlock(objectID2) + + time.Sleep(50 * time.Millisecond) + resultsMutex.Lock() + results = append(results, "object2") + resultsMutex.Unlock() + }() + + wg.Wait() + + // Both operations should complete (order doesn't matter) + assert.Len(t, results, 2, "Both operations should complete") +} + +func TestObjectMutexManager_SerializeSameObject(t *testing.T) { + manager := &objectMutexManager{ + mutexes: make(map[string]*sync.Mutex), + } + + objectID := "/jobs/job-123" + var executionOrder []int + var orderMutex sync.Mutex + var wg sync.WaitGroup + + // Launch 3 goroutines that should execute serially + for i := 1; i <= 3; i++ { + wg.Add(1) + go func(num int) { + defer wg.Done() + + manager.Lock(objectID) + defer manager.Unlock(objectID) + + // Record execution order + orderMutex.Lock() + executionOrder = append(executionOrder, num) + orderMutex.Unlock() + + time.Sleep(10 * time.Millisecond) // Simulate work + }(i) + } + + wg.Wait() + + // All 3 operations should complete + assert.Len(t, executionOrder, 3, "All operations should complete") + + // Operations should be serialized (no concurrent execution) + // We can't predict the exact order, but we can verify no race conditions occurred +} + +func TestGlobalObjectMutexManager(t *testing.T) { + // Test the global singleton + objectID := "/notebooks/notebook-456" + + lockObject(objectID) + + // Verify we can unlock without error + unlockObject(objectID) + + // Should be able to lock again after unlocking + lockObject(objectID) + unlockObject(objectID) +} + +func TestObjectMutexManager_UnlockNonexistent(t *testing.T) { + manager := &objectMutexManager{ + mutexes: make(map[string]*sync.Mutex), + } + + // Unlocking a non-existent object should not panic + assert.NotPanics(t, func() { + manager.Unlock("/clusters/does-not-exist") + }, "Unlocking non-existent object should not panic") +} diff --git a/internal/providers/pluginfw/products/permissions/resource_permission.go b/internal/providers/pluginfw/products/permissions/resource_permission.go index 1a38973e3a..7de8a9bd2f 100644 --- a/internal/providers/pluginfw/products/permissions/resource_permission.go +++ b/internal/providers/pluginfw/products/permissions/resource_permission.go @@ -200,6 +200,10 @@ func (r *PermissionResource) Create(ctx context.Context, req resource.CreateRequ return } + // Lock the object to prevent concurrent modifications + lockObject(objectID) + defer unlockObject(objectID) + // Determine principal identifier var principal string if !userName.IsNull() && !userName.IsUnknown() && userName.ValueString() != "" { @@ -349,6 +353,10 @@ func (r *PermissionResource) Update(ctx context.Context, req resource.UpdateRequ return } + // Lock the object to prevent concurrent modifications + lockObject(objectID) + defer unlockObject(objectID) + // Get the mapping from the ID mapping, err := permissions.GetResourcePermissionsFromId(objectID) if err != nil { @@ -411,6 +419,12 @@ func (r *PermissionResource) Delete(ctx context.Context, req resource.DeleteRequ return } + // Lock the object to prevent concurrent modifications + // This is CRITICAL for Delete to avoid race conditions when multiple + // permission resources for the same object are deleted concurrently + lockObject(objectID) + defer unlockObject(objectID) + // Get the mapping from the ID mapping, err := permissions.GetResourcePermissionsFromId(objectID) if err != nil { From e2edb579504d80f84f33c7f2b97cc353f9eacd17 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Wed, 29 Oct 2025 15:14:30 +0100 Subject: [PATCH 4/5] implement import, better handling of missing permissions --- .../permissions/resource_permission.go | 105 +++++++++++++++++- .../permissions/resource_permission_test.go | 62 +++++++++++ 2 files changed, 165 insertions(+), 2 deletions(-) diff --git a/internal/providers/pluginfw/products/permissions/resource_permission.go b/internal/providers/pluginfw/products/permissions/resource_permission.go index 7de8a9bd2f..d4a110c65b 100644 --- a/internal/providers/pluginfw/products/permissions/resource_permission.go +++ b/internal/providers/pluginfw/products/permissions/resource_permission.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/service/iam" "github.com/databricks/terraform-provider-databricks/common" pluginfwcommon "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/common" @@ -24,6 +25,7 @@ import ( const resourceName = "permission" var _ resource.ResourceWithConfigure = &PermissionResource{} +var _ resource.ResourceWithImportState = &PermissionResource{} func ResourcePermission() resource.Resource { return &PermissionResource{} @@ -244,7 +246,7 @@ func (r *PermissionResource) Create(ctx context.Context, req resource.CreateRequ // Set the ID, object_type, and all other fields in state resourceID := fmt.Sprintf("%s/%s", objectID, principal) resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("id"), types.StringValue(resourceID))...) - resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("object_type"), types.StringValue(mapping.GetObjectType()))...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("object_type"), types.StringValue(mapping.GetRequestObjectType()))...) resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root(objectFieldName), objectFieldValue)...) // Set the object identifier field resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("user_name"), userName)...) resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("group_name"), groupName)...) @@ -289,6 +291,11 @@ func (r *PermissionResource) Read(ctx context.Context, req resource.ReadRequest, RequestObjectType: mapping.GetRequestObjectType(), }) if err != nil { + // If the object or permissions are not found, remove from state to trigger recreation + if apierr.IsMissing(err) { + resp.State.RemoveResource(ctx) + return + } resp.Diagnostics.AddError("Failed to read permissions", err.Error()) return } @@ -308,7 +315,7 @@ func (r *PermissionResource) Read(ctx context.Context, req resource.ReadRequest, } if !found { - // Permission no longer exists, remove from state + // Permission for this specific principal no longer exists, remove from state to trigger recreation resp.State.RemoveResource(ctx) return } @@ -441,6 +448,11 @@ func (r *PermissionResource) Delete(ctx context.Context, req resource.DeleteRequ RequestObjectType: mapping.GetRequestObjectType(), }) if err != nil { + // If the object or permissions are not found, the permission is already gone + // This is the desired state, so we can return successfully + if apierr.IsMissing(err) { + return + } resp.Diagnostics.AddError("Failed to read current permissions", err.Error()) return } @@ -468,11 +480,100 @@ func (r *PermissionResource) Delete(ctx context.Context, req resource.DeleteRequ AccessControlList: remainingACLs, }) if err != nil { + // If the object or principal doesn't exist, the permission is already gone + // This can happen if the underlying object or principal was deleted outside of Terraform + if apierr.IsMissing(err) { + return + } resp.Diagnostics.AddError("Failed to delete permission", err.Error()) return } } +func (r *PermissionResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Import ID format: /// + // Example: /clusters/cluster-123/user@example.com + + w, err := r.Client.WorkspaceClient() + if err != nil { + resp.Diagnostics.AddError("Failed to get workspace client", err.Error()) + return + } + + // Parse the import ID + objectID, principal, err := r.parseID(req.ID) + if err != nil { + resp.Diagnostics.AddError( + "Invalid Import ID Format", + fmt.Sprintf("Expected format: ///. Error: %s", err.Error()), + ) + return + } + + // Get the mapping from the ID to determine object type and field + mapping, err := permissions.GetResourcePermissionsFromId(objectID) + if err != nil { + resp.Diagnostics.AddError("Failed to get resource permissions mapping", err.Error()) + return + } + + // Read current permissions from Databricks + idParts := strings.Split(objectID, "/") + permID := idParts[len(idParts)-1] + + perms, err := w.Permissions.Get(ctx, iam.GetPermissionRequest{ + RequestObjectId: permID, + RequestObjectType: mapping.GetRequestObjectType(), + }) + if err != nil { + resp.Diagnostics.AddError("Failed to read permissions", err.Error()) + return + } + + // Find the specific principal's permission + var found bool + var permissionLevel string + var userName, groupName, servicePrincipalName string + + for _, acl := range perms.AccessControlList { + if r.matchesPrincipal(acl, principal) { + if len(acl.AllPermissions) > 0 { + permissionLevel = string(acl.AllPermissions[0].PermissionLevel) + userName = acl.UserName + groupName = acl.GroupName + servicePrincipalName = acl.ServicePrincipalName + found = true + break + } + } + } + + if !found { + resp.Diagnostics.AddError( + "Permission Not Found", + fmt.Sprintf("No permission found for principal %q on object %q", principal, objectID), + ) + return + } + + // Set all attributes in state using SetAttribute + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("id"), types.StringValue(req.ID))...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("object_type"), types.StringValue(mapping.GetRequestObjectType()))...) + + // Set the object identifier field (e.g., cluster_id, job_id, etc.) + // Extract the configured value from the objectID + configuredValue := permID + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root(mapping.GetField()), types.StringValue(configuredValue))...) + + // Set principal fields + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("user_name"), types.StringValue(userName))...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("group_name"), types.StringValue(groupName))...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("service_principal_name"), types.StringValue(servicePrincipalName))...) + + // Set permission level + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("permission_level"), types.StringValue(permissionLevel))...) +} + // Helper methods // AttributeGetter is an interface for types that can get attributes (Plan, Config, State) diff --git a/internal/providers/pluginfw/products/permissions/resource_permission_test.go b/internal/providers/pluginfw/products/permissions/resource_permission_test.go index dccb1c691f..fdeb1ea2a1 100644 --- a/internal/providers/pluginfw/products/permissions/resource_permission_test.go +++ b/internal/providers/pluginfw/products/permissions/resource_permission_test.go @@ -138,3 +138,65 @@ func TestPermissionResource_Configure(t *testing.T) { assert.False(t, resp.Diagnostics.HasError()) assert.Equal(t, client, r.Client) } + +func TestPermissionResource_ImportState(t *testing.T) { + // Verify the resource implements the ImportState interface + var _ resource.ResourceWithImportState = &PermissionResource{} + + // Test that parseID correctly handles import ID format + r := &PermissionResource{} + + tests := []struct { + name string + importID string + expectedObjID string + expectedPrinc string + expectError bool + }{ + { + name: "valid cluster import", + importID: "/clusters/cluster-123/user@example.com", + expectedObjID: "/clusters/cluster-123", + expectedPrinc: "user@example.com", + expectError: false, + }, + { + name: "valid job import", + importID: "/jobs/job-456/data-engineers", + expectedObjID: "/jobs/job-456", + expectedPrinc: "data-engineers", + expectError: false, + }, + { + name: "valid authorization import", + importID: "/authorization/tokens/team-a", + expectedObjID: "/authorization/tokens", + expectedPrinc: "team-a", + expectError: false, + }, + { + name: "invalid format - too few parts", + importID: "/clusters/cluster-123", + expectError: true, + }, + { + name: "invalid format - no slashes", + importID: "cluster-123-user", + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + objectID, principal, err := r.parseID(tt.importID) + + if tt.expectError { + assert.Error(t, err, "Expected error for invalid import ID") + } else { + assert.NoError(t, err, "Expected no error for valid import ID") + assert.Equal(t, tt.expectedObjID, objectID, "Object ID should match") + assert.Equal(t, tt.expectedPrinc, principal, "Principal should match") + } + }) + } +} From ac8512437e45348331af3eba1dea3f520b26fe36 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Wed, 29 Oct 2025 15:56:33 +0100 Subject: [PATCH 5/5] more fixes for tests --- .../permissions/resource_permission.go | 22 +++++++++++++++---- permissions/resource_permission_acc_test.go | 12 +++++----- 2 files changed, 24 insertions(+), 10 deletions(-) diff --git a/internal/providers/pluginfw/products/permissions/resource_permission.go b/internal/providers/pluginfw/products/permissions/resource_permission.go index d4a110c65b..0e7e9fd47c 100644 --- a/internal/providers/pluginfw/products/permissions/resource_permission.go +++ b/internal/providers/pluginfw/products/permissions/resource_permission.go @@ -565,10 +565,24 @@ func (r *PermissionResource) ImportState(ctx context.Context, req resource.Impor configuredValue := permID resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root(mapping.GetField()), types.StringValue(configuredValue))...) - // Set principal fields - resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("user_name"), types.StringValue(userName))...) - resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("group_name"), types.StringValue(groupName))...) - resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("service_principal_name"), types.StringValue(servicePrincipalName))...) + // Set principal fields - use null for empty strings to avoid ImportStateVerify failures + if userName != "" { + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("user_name"), types.StringValue(userName))...) + } else { + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("user_name"), types.StringNull())...) + } + + if groupName != "" { + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("group_name"), types.StringValue(groupName))...) + } else { + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("group_name"), types.StringNull())...) + } + + if servicePrincipalName != "" { + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("service_principal_name"), types.StringValue(servicePrincipalName))...) + } else { + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("service_principal_name"), types.StringNull())...) + } // Set permission level resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("permission_level"), types.StringValue(permissionLevel))...) diff --git a/permissions/resource_permission_acc_test.go b/permissions/resource_permission_acc_test.go index aa321f225c..1c0158e999 100644 --- a/permissions/resource_permission_acc_test.go +++ b/permissions/resource_permission_acc_test.go @@ -175,15 +175,15 @@ func TestAccPermission_Authorization_Tokens(t *testing.T) { acceptance.LoadDebugEnvIfRunsFromIDE(t, "workspace") template := ` resource "databricks_group" "team_a" { - display_name = "permission-team-a-{var.RANDOM}" + display_name = "permission-team-a-{var.STICKY_RANDOM}" } resource "databricks_group" "team_b" { - display_name = "permission-team-b-{var.RANDOM}" + display_name = "permission-team-b-{var.STICKY_RANDOM}" } resource "databricks_group" "team_c" { - display_name = "permission-team-c-{var.RANDOM}" + display_name = "permission-team-c-{var.STICKY_RANDOM}" } # This demonstrates the key benefit: each team's token permissions @@ -245,15 +245,15 @@ resource "databricks_permission" "tokens_team_c" { // Update one permission independently - remove team_b Template: ` resource "databricks_group" "team_a" { - display_name = "permission-team-a-{var.RANDOM}" + display_name = "permission-team-a-{var.STICKY_RANDOM}" } resource "databricks_group" "team_b" { - display_name = "permission-team-b-{var.RANDOM}" + display_name = "permission-team-b-{var.STICKY_RANDOM}" } resource "databricks_group" "team_c" { - display_name = "permission-team-c-{var.RANDOM}" + display_name = "permission-team-c-{var.STICKY_RANDOM}" } resource "databricks_permission" "tokens_team_a" {