Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 11 additions & 5 deletions catalog/resource_catalog.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,13 @@ func ucDirectoryPathSlashAndEmptySuppressDiff(k, old, new string, d *schema.Reso
return false
}

type CatalogInfo struct {
catalog.CatalogInfo
common.Namespace
}

func ResourceCatalog() common.Resource {
catalogSchema := common.StructToSchema(catalog.CatalogInfo{},
catalogSchema := common.StructToSchema(CatalogInfo{},
func(s map[string]*schema.Schema) map[string]*schema.Schema {
s["force_destroy"] = &schema.Schema{
Type: schema.TypeBool,
Expand Down Expand Up @@ -58,12 +63,13 @@ func ResourceCatalog() common.Resource {
common.CustomizeSchemaPath(s, v).SetReadOnly()
}
common.CustomizeSchemaPath(s, "effective_predictive_optimization_flag").SetComputed().SetSuppressDiff()
common.NamespaceCustomizeSchemaMap(s)
return s
})
return common.Resource{
Schema: catalogSchema,
Create: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error {
w, err := c.WorkspaceClient()
w, err := c.WorkspaceClientUnifiedProvider(ctx, d)
if err != nil {
return err
}
Expand Down Expand Up @@ -107,7 +113,7 @@ func ResourceCatalog() common.Resource {
return bindings.AddCurrentWorkspaceBindings(ctx, d, w, ci.Name, bindings.BindingsSecurableTypeCatalog)
},
Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error {
w, err := c.WorkspaceClient()
w, err := c.WorkspaceClientUnifiedProvider(ctx, d)
if err != nil {
return err
}
Expand All @@ -119,7 +125,7 @@ func ResourceCatalog() common.Resource {
return common.StructToData(ci, catalogSchema, d)
},
Update: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error {
w, err := c.WorkspaceClient()
w, err := c.WorkspaceClientUnifiedProvider(ctx, d)
if err != nil {
return err
}
Expand Down Expand Up @@ -188,7 +194,7 @@ func ResourceCatalog() common.Resource {
return bindings.AddCurrentWorkspaceBindings(ctx, d, w, ci.Name, bindings.BindingsSecurableTypeCatalog)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This probably needs to be moved up to the top of Update(), otherwise the WorkspaceClient used when changing provider_config.workspace_id may not have access to the isolated catalog (unless that workspace also happens to have a workspace binding separately).

},
Delete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error {
w, err := c.WorkspaceClient()
w, err := c.WorkspaceClientUnifiedProvider(ctx, d)
if err != nil {
return err
}
Expand Down
1 change: 1 addition & 0 deletions clusters/data_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (

func DataSourceCluster() common.Resource {
return common.WorkspaceData(func(ctx context.Context, data *struct {
common.Namespace
Id string `json:"id,omitempty" tf:"computed"`
ClusterId string `json:"cluster_id,omitempty" tf:"computed"`
Name string `json:"cluster_name,omitempty" tf:"computed"`
Expand Down
1 change: 1 addition & 0 deletions clusters/data_clusters.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (

func DataSourceClusters() common.Resource {
return common.WorkspaceData(func(ctx context.Context, data *struct {
common.Namespace
Id string `json:"id,omitempty" tf:"computed"`
Ids []string `json:"ids,omitempty" tf:"computed,slice_set"`
ClusterNameContains string `json:"cluster_name_contains,omitempty"`
Expand Down
9 changes: 7 additions & 2 deletions clusters/data_spark_version.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,16 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)

type sparkVersionRequestWrapper struct {
common.Namespace
compute.SparkVersionRequest
}

// DataSourceSparkVersion returns DBR version matching to the specification
func DataSourceSparkVersion() common.Resource {
return common.WorkspaceDataWithCustomizeFunc(func(ctx context.Context, data *compute.SparkVersionRequest, w *databricks.WorkspaceClient) error {
return common.WorkspaceDataWithCustomizeFunc(func(ctx context.Context, data *sparkVersionRequestWrapper, w *databricks.WorkspaceClient) error {
data.Id = ""
version, err := w.Clusters.SelectSparkVersion(ctx, *data)
version, err := w.Clusters.SelectSparkVersion(ctx, data.SparkVersionRequest)
if err != nil {
return err
}
Expand Down
1 change: 1 addition & 0 deletions clusters/data_zones.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
// DataSourceClusterZones ...
func DataSourceClusterZones() common.Resource {
return common.WorkspaceData(func(ctx context.Context, data *struct {
common.Namespace
Id string `json:"id,omitempty" tf:"computed"`
DefaultZone string `json:"default_zone,omitempty" tf:"computed"`
Zones []string `json:"zones,omitempty" tf:"computed"`
Expand Down
13 changes: 9 additions & 4 deletions clusters/resource_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,9 @@ const (

func ResourceCluster() common.Resource {
return common.Resource{
CustomizeDiff: func(ctx context.Context, d *schema.ResourceDiff) error {
return common.NamespaceCustomizeDiff(d)
},
Create: resourceClusterCreate,
Read: resourceClusterRead,
Update: resourceClusterUpdate,
Expand Down Expand Up @@ -275,6 +278,7 @@ type LibraryWithAlias struct {

type ClusterSpec struct {
compute.ClusterSpec
common.Namespace
LibraryWithAlias
}

Expand Down Expand Up @@ -323,6 +327,7 @@ func (ClusterSpec) CustomizeSchemaResourceSpecific(s *common.CustomizableSchema)
}

func (ClusterSpec) CustomizeSchema(s *common.CustomizableSchema) *common.CustomizableSchema {
common.NamespaceCustomizeSchema(s)
s.SchemaPath("enable_elastic_disk").SetComputed()
s.SchemaPath("enable_local_disk_encryption").SetComputed()
s.SchemaPath("node_type_id").SetComputed().SetConflictsWith([]string{"driver_instance_pool_id", "instance_pool_id"})
Expand Down Expand Up @@ -388,7 +393,7 @@ func resourceClusterSchema() map[string]*schema.Schema {
func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error {
start := time.Now()
timeout := d.Timeout(schema.TimeoutCreate)
w, err := c.WorkspaceClient()
w, err := c.WorkspaceClientUnifiedProvider(ctx, d)
if err != nil {
return err
}
Expand Down Expand Up @@ -480,7 +485,7 @@ func setPinnedStatus(ctx context.Context, d *schema.ResourceData, clusterAPI com
}

func resourceClusterRead(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error {
w, err := c.WorkspaceClient()
w, err := c.WorkspaceClientUnifiedProvider(ctx, d)
if err != nil {
return err
}
Expand Down Expand Up @@ -532,7 +537,7 @@ func hasClusterConfigChanged(d *schema.ResourceData) bool {
}

func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error {
w, err := c.WorkspaceClient()
w, err := c.WorkspaceClientUnifiedProvider(ctx, d)
if err != nil {
return err
}
Expand Down Expand Up @@ -697,7 +702,7 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, c *commo
}

func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error {
w, err := c.WorkspaceClient()
w, err := c.WorkspaceClientUnifiedProvider(ctx, d)
if err != nil {
return err
}
Expand Down
11 changes: 8 additions & 3 deletions clusters/resource_library.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import (

type LibraryResource struct {
compute.Library
common.Namespace
}

func (LibraryResource) CustomizeSchemaResourceSpecific(s *common.CustomizableSchema) *common.CustomizableSchema {
Expand All @@ -28,6 +29,7 @@ func (LibraryResource) CustomizeSchemaResourceSpecific(s *common.CustomizableSch
const EggDeprecationWarning = "The `egg` library type is deprecated. Please use `whl` or `pypi` instead."

func (LibraryResource) CustomizeSchema(s *common.CustomizableSchema) *common.CustomizableSchema {
common.NamespaceCustomizeSchema(s)
s.SchemaPath("egg").SetDeprecated(EggDeprecationWarning)
return s
}
Expand All @@ -43,8 +45,11 @@ func ResourceLibrary() common.Resource {
}
return common.Resource{
Schema: libraySdkSchema,
CustomizeDiff: func(ctx context.Context, d *schema.ResourceDiff) error {
return common.NamespaceCustomizeDiff(d)
},
Create: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error {
w, err := c.WorkspaceClient()
w, err := c.WorkspaceClientUnifiedProvider(ctx, d)
if err != nil {
return err
}
Expand Down Expand Up @@ -74,7 +79,7 @@ func ResourceLibrary() common.Resource {
},
Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error {
clusterID, libraryRep := parseId(d.Id())
w, err := c.WorkspaceClient()
w, err := c.WorkspaceClientUnifiedProvider(ctx, d)
if err != nil {
return err
}
Expand Down Expand Up @@ -106,7 +111,7 @@ func ResourceLibrary() common.Resource {
},
Delete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error {
clusterID, libraryRep := parseId(d.Id())
w, err := c.WorkspaceClient()
w, err := c.WorkspaceClientUnifiedProvider(ctx, d)
if err != nil {
return err
}
Expand Down
80 changes: 51 additions & 29 deletions common/resource.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading
Loading