Skip to content

Commit 86f53be

Browse files
authored
Merge pull request #101 from databrickslabs/group-role-member-feature
Group role member feature
2 parents 8998fb4 + 54d5011 commit 86f53be

File tree

108 files changed

+15481
-1846
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

108 files changed

+15481
-1846
lines changed

databricks/provider.go

Lines changed: 21 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -26,23 +26,27 @@ func Provider(version string) terraform.ResourceProvider {
2626
"databricks_zones": dataSourceClusterZones(),
2727
},
2828
ResourcesMap: map[string]*schema.Resource{
29-
"databricks_token": resourceToken(),
30-
"databricks_secret_scope": resourceSecretScope(),
31-
"databricks_secret": resourceSecret(),
32-
"databricks_secret_acl": resourceSecretACL(),
33-
"databricks_instance_pool": resourceInstancePool(),
34-
"databricks_scim_user": resourceScimUser(),
35-
"databricks_scim_group": resourceScimGroup(),
36-
"databricks_notebook": resourceNotebook(),
37-
"databricks_cluster": resourceCluster(),
38-
"databricks_job": resourceJob(),
39-
"databricks_dbfs_file": resourceDBFSFile(),
40-
"databricks_dbfs_file_sync": resourceDBFSFileSync(),
41-
"databricks_instance_profile": resourceInstanceProfile(),
42-
"databricks_aws_s3_mount": resourceAWSS3Mount(),
43-
"databricks_azure_blob_mount": resourceAzureBlobMount(),
44-
"databricks_azure_adls_gen1_mount": resourceAzureAdlsGen1Mount(),
45-
"databricks_azure_adls_gen2_mount": resourceAzureAdlsGen2Mount(),
29+
"databricks_token": resourceToken(),
30+
"databricks_secret_scope": resourceSecretScope(),
31+
"databricks_secret": resourceSecret(),
32+
"databricks_secret_acl": resourceSecretACL(),
33+
"databricks_instance_pool": resourceInstancePool(),
34+
"databricks_scim_user": resourceScimUser(),
35+
"databricks_scim_group": resourceScimGroup(),
36+
// Scim Group is split into multiple components for flexibility to pick and choose
37+
"databricks_group": resourceGroup(),
38+
"databricks_group_instance_profile": resourceGroupInstanceProfile(),
39+
"databricks_group_member": resourceGroupMember(),
40+
"databricks_notebook": resourceNotebook(),
41+
"databricks_cluster": resourceCluster(),
42+
"databricks_job": resourceJob(),
43+
"databricks_dbfs_file": resourceDBFSFile(),
44+
"databricks_dbfs_file_sync": resourceDBFSFileSync(),
45+
"databricks_instance_profile": resourceInstanceProfile(),
46+
"databricks_aws_s3_mount": resourceAWSS3Mount(),
47+
"databricks_azure_blob_mount": resourceAzureBlobMount(),
48+
"databricks_azure_adls_gen1_mount": resourceAzureAdlsGen1Mount(),
49+
"databricks_azure_adls_gen2_mount": resourceAzureAdlsGen2Mount(),
4650
// MWS (multiple workspaces) resources are only limited to AWS as azure already has a built in concept of MWS
4751
"databricks_mws_credentials": resourceMWSCredentials(),
4852
"databricks_mws_storage_configurations": resourceMWSStorageConfigurations(),
Lines changed: 150 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,150 @@
1+
package databricks
2+
3+
import (
4+
"log"
5+
6+
"github.com/databrickslabs/databricks-terraform/client/model"
7+
"github.com/databrickslabs/databricks-terraform/client/service"
8+
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
9+
)
10+
11+
func resourceGroup() *schema.Resource {
12+
return &schema.Resource{
13+
Create: resourceGroupCreate,
14+
Update: resourceGroupUpdate,
15+
Read: resourceGroupRead,
16+
Delete: resourceGroupDelete,
17+
18+
Schema: map[string]*schema.Schema{
19+
"display_name": {
20+
Type: schema.TypeString,
21+
ForceNew: true,
22+
Required: true,
23+
},
24+
"allow_cluster_create": {
25+
Type: schema.TypeBool,
26+
Optional: true,
27+
},
28+
"allow_instance_pool_create": {
29+
Type: schema.TypeBool,
30+
Optional: true,
31+
},
32+
},
33+
Importer: &schema.ResourceImporter{
34+
State: schema.ImportStatePassthrough,
35+
},
36+
}
37+
}
38+
39+
func resourceGroupCreate(d *schema.ResourceData, m interface{}) error {
40+
client := m.(*service.DBApiClient)
41+
groupName := d.Get("display_name").(string)
42+
allowClusterCreate := d.Get("allow_cluster_create").(bool)
43+
allowInstancePoolCreate := d.Get("allow_instance_pool_create").(bool)
44+
45+
// If entitlement flags are set to be true
46+
var entitlementsList []string
47+
if allowClusterCreate {
48+
entitlementsList = append(entitlementsList, string(model.AllowClusterCreateEntitlement))
49+
}
50+
if allowInstancePoolCreate {
51+
entitlementsList = append(entitlementsList, string(model.AllowInstancePoolCreateEntitlement))
52+
}
53+
54+
group, err := client.Groups().Create(groupName, nil, nil, entitlementsList)
55+
if err != nil {
56+
return err
57+
}
58+
d.SetId(group.ID)
59+
return resourceGroupRead(d, m)
60+
}
61+
62+
func resourceGroupRead(d *schema.ResourceData, m interface{}) error {
63+
id := d.Id()
64+
client := m.(*service.DBApiClient)
65+
group, err := client.Groups().Read(id)
66+
if err != nil {
67+
if isScimGroupMissing(err.Error(), id) {
68+
log.Printf("Missing scim group with id: %s.", id)
69+
d.SetId("")
70+
return nil
71+
}
72+
return err
73+
}
74+
75+
err = d.Set("display_name", group.DisplayName)
76+
if err != nil {
77+
return err
78+
}
79+
80+
err = d.Set("allow_cluster_create", isGroupClusterCreateEntitled(&group))
81+
if err != nil {
82+
return err
83+
}
84+
85+
err = d.Set("allow_instance_pool_create", isGroupInstancePoolCreateEntitled(&group))
86+
return err
87+
}
88+
89+
func resourceGroupUpdate(d *schema.ResourceData, m interface{}) error {
90+
id := d.Id()
91+
client := m.(*service.DBApiClient)
92+
93+
// Handle entitlements update
94+
var entitlementsAddList []string
95+
var entitlementsRemoveList []string
96+
// If allow_cluster_create has changed
97+
if d.HasChange("allow_cluster_create") {
98+
allowClusterCreate := d.Get("allow_cluster_create").(bool)
99+
// Changed to true
100+
if allowClusterCreate {
101+
entitlementsAddList = append(entitlementsAddList, string(model.AllowClusterCreateEntitlement))
102+
}
103+
// Changed to false
104+
entitlementsRemoveList = append(entitlementsRemoveList, string(model.AllowClusterCreateEntitlement))
105+
}
106+
// If allow_instance_pool_create has changed
107+
if d.HasChange("allow_instance_pool_create") {
108+
allowClusterCreate := d.Get("allow_instance_pool_create").(bool)
109+
// Changed to true
110+
if allowClusterCreate {
111+
entitlementsAddList = append(entitlementsAddList, string(model.AllowClusterCreateEntitlement))
112+
}
113+
// Changed to false
114+
entitlementsRemoveList = append(entitlementsRemoveList, string(model.AllowClusterCreateEntitlement))
115+
}
116+
117+
if entitlementsAddList != nil || entitlementsRemoveList != nil {
118+
err := client.Groups().Patch(id, entitlementsAddList, entitlementsRemoveList, model.GroupEntitlementsPath)
119+
if err != nil {
120+
return err
121+
}
122+
}
123+
124+
return nil
125+
}
126+
127+
func resourceGroupDelete(d *schema.ResourceData, m interface{}) error {
128+
id := d.Id()
129+
client := m.(*service.DBApiClient)
130+
err := client.Groups().Delete(id)
131+
return err
132+
}
133+
134+
func isGroupClusterCreateEntitled(group *model.Group) bool {
135+
for _, entitlement := range group.Entitlements {
136+
if entitlement.Value == model.AllowClusterCreateEntitlement {
137+
return true
138+
}
139+
}
140+
return false
141+
}
142+
143+
func isGroupInstancePoolCreateEntitled(group *model.Group) bool {
144+
for _, entitlement := range group.Entitlements {
145+
if entitlement.Value == model.AllowClusterCreateEntitlement {
146+
return true
147+
}
148+
}
149+
return false
150+
}
Lines changed: 163 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,163 @@
1+
package databricks
2+
3+
import (
4+
"errors"
5+
"fmt"
6+
"testing"
7+
8+
"github.com/databrickslabs/databricks-terraform/client/model"
9+
"github.com/databrickslabs/databricks-terraform/client/service"
10+
"github.com/hashicorp/terraform-plugin-sdk/helper/acctest"
11+
"github.com/hashicorp/terraform-plugin-sdk/helper/resource"
12+
"github.com/hashicorp/terraform-plugin-sdk/terraform"
13+
"github.com/stretchr/testify/assert"
14+
)
15+
16+
func TestAccAWSGroupResource(t *testing.T) {
17+
var Group model.Group
18+
// generate a random name for each tokenInfo test run, to avoid
19+
// collisions from multiple concurrent tests.
20+
// the acctest package includes many helpers such as RandStringFromCharSet
21+
// See https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/acctest
22+
//scope := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
23+
randomStr := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum)
24+
displayName := fmt.Sprintf("tf group test %s", randomStr)
25+
newDisplayName := fmt.Sprintf("new tf group test %s", randomStr)
26+
resource.Test(t, resource.TestCase{
27+
Providers: testAccProviders,
28+
CheckDestroy: testAWSGroupResourceDestroy,
29+
Steps: []resource.TestStep{
30+
{
31+
// use a dynamic configuration with the random name from above
32+
Config: testAWSDatabricksGroup(displayName),
33+
// compose a basic test, checking both remote and local values
34+
Check: resource.ComposeTestCheckFunc(
35+
// query the API to retrieve the tokenInfo object
36+
testAWSGroupResourceExists("databricks_group.my_group", &Group, t),
37+
// verify remote values
38+
testAWSGroupValues(t, &Group, displayName),
39+
// verify local values
40+
resource.TestCheckResourceAttr("databricks_group.my_group", "display_name", displayName),
41+
),
42+
Destroy: false,
43+
},
44+
{
45+
// use a dynamic configuration with the random name from above
46+
Config: testAWSDatabricksGroup(newDisplayName),
47+
// test to see if new resource is attempted to be planned
48+
PlanOnly: true,
49+
ExpectNonEmptyPlan: true,
50+
Destroy: false,
51+
},
52+
{
53+
ResourceName: "databricks_group.my_group",
54+
ImportState: true,
55+
ImportStateVerify: true,
56+
},
57+
},
58+
})
59+
}
60+
61+
func TestAccAWSGroupResource_verify_entitlements(t *testing.T) {
62+
var Group model.Group
63+
// generate a random name for each tokenInfo test run, to avoid
64+
// collisions from multiple concurrent tests.
65+
// the acctest package includes many helpers such as RandStringFromCharSet
66+
// See https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/acctest
67+
//scope := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
68+
randomStr := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum)
69+
displayName := fmt.Sprintf("tf group test %s", randomStr)
70+
newDisplayName := fmt.Sprintf("new tf group test %s", randomStr)
71+
resource.Test(t, resource.TestCase{
72+
Providers: testAccProviders,
73+
CheckDestroy: testAWSGroupResourceDestroy,
74+
Steps: []resource.TestStep{
75+
{
76+
// use a dynamic configuration with the random name from above
77+
Config: testAWSDatabricksGroupEntitlements(displayName, "true", "true"),
78+
// compose a basic test, checking both remote and local values
79+
Check: resource.ComposeTestCheckFunc(
80+
// query the API to retrieve the tokenInfo object
81+
testAWSGroupResourceExists("databricks_group.my_group", &Group, t),
82+
// verify remote values
83+
testAWSGroupValues(t, &Group, displayName),
84+
// verify local values
85+
resource.TestCheckResourceAttr("databricks_group.my_group", "allow_cluster_create", "true"),
86+
resource.TestCheckResourceAttr("databricks_group.my_group", "allow_instance_pool_create", "true"),
87+
),
88+
Destroy: false,
89+
},
90+
// Remove entitlements and expect a non empty plan
91+
{
92+
// use a dynamic configuration with the random name from above
93+
Config: testAWSDatabricksGroup(newDisplayName),
94+
// test to see if new resource is attempted to be planned
95+
PlanOnly: true,
96+
ExpectNonEmptyPlan: true,
97+
Destroy: false,
98+
},
99+
},
100+
})
101+
}
102+
103+
func testAWSGroupResourceDestroy(s *terraform.State) error {
104+
client := testAccProvider.Meta().(*service.DBApiClient)
105+
for _, rs := range s.RootModule().Resources {
106+
if rs.Type != "databricks_group" {
107+
continue
108+
}
109+
_, err := client.Users().Read(rs.Primary.ID)
110+
if err != nil {
111+
return nil
112+
}
113+
return errors.New("resource Group is not cleaned up")
114+
}
115+
return nil
116+
}
117+
118+
func testAWSGroupValues(t *testing.T, group *model.Group, displayName string) resource.TestCheckFunc {
119+
return func(s *terraform.State) error {
120+
assert.True(t, group.DisplayName == displayName)
121+
return nil
122+
}
123+
}
124+
125+
// testAccCheckTokenResourceExists queries the API and retrieves the matching Widget.
126+
func testAWSGroupResourceExists(n string, group *model.Group, t *testing.T) resource.TestCheckFunc {
127+
return func(s *terraform.State) error {
128+
// find the corresponding state object
129+
rs, ok := s.RootModule().Resources[n]
130+
if !ok {
131+
return fmt.Errorf("Not found: %s", n)
132+
}
133+
134+
// retrieve the configured client from the test setup
135+
conn := testAccProvider.Meta().(*service.DBApiClient)
136+
resp, err := conn.Groups().Read(rs.Primary.ID)
137+
if err != nil {
138+
return err
139+
}
140+
141+
// If no error, assign the response Widget attribute to the widget pointer
142+
*group = resp
143+
return nil
144+
}
145+
}
146+
147+
func testAWSDatabricksGroup(groupName string) string {
148+
return fmt.Sprintf(`
149+
resource "databricks_group" "my_group" {
150+
display_name = "%s"
151+
}
152+
`, groupName)
153+
}
154+
155+
func testAWSDatabricksGroupEntitlements(groupName, allowClusterCreate, allowPoolCreate string) string {
156+
return fmt.Sprintf(`
157+
resource "databricks_group" "my_group" {
158+
display_name = "%s"
159+
allow_cluster_create = %s
160+
allow_instance_pool_create = %s
161+
}
162+
`, groupName, allowClusterCreate, allowPoolCreate)
163+
}

0 commit comments

Comments
 (0)