diff --git a/VERSION b/VERSION
index 337a6a8..e5a4a5e 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.0.8
\ No newline at end of file
+1.0.9
\ No newline at end of file
diff --git a/api/client.go b/api/client.go
index 56f0864..a1e0227 100644
--- a/api/client.go
+++ b/api/client.go
@@ -59,6 +59,8 @@ type Client interface {
ListDatabase(ctx context.Context, instanceID, filter string) (*v1pb.ListDatabasesResponse, error)
// UpdateDatabase patches the database.
UpdateDatabase(ctx context.Context, patch *v1pb.Database, updateMasks []string) (*v1pb.Database, error)
+ // BatchUpdateDatabases batch updates databases.
+ BatchUpdateDatabases(ctx context.Context, request *v1pb.BatchUpdateDatabasesRequest) (*v1pb.BatchUpdateDatabasesResponse, error)
// GetDatabaseCatalog gets the database catalog by the database full name.
GetDatabaseCatalog(ctx context.Context, databaseName string) (*v1pb.DatabaseCatalog, error)
// UpdateDatabaseCatalog patches the database catalog.
diff --git a/client/database.go b/client/database.go
index 00b29cf..e3e95a4 100644
--- a/client/database.go
+++ b/client/database.go
@@ -5,8 +5,10 @@ import (
"fmt"
"net/http"
"net/url"
+ "strings"
v1pb "github.com/bytebase/bytebase/proto/generated-go/v1"
+ "google.golang.org/protobuf/encoding/protojson"
)
// GetDatabase gets the database by the database full name.
@@ -64,6 +66,32 @@ func (c *client) UpdateDatabase(ctx context.Context, patch *v1pb.Database, updat
return &res, nil
}
+// BatchUpdateDatabases batch updates databases.
+func (c *client) BatchUpdateDatabases(ctx context.Context, request *v1pb.BatchUpdateDatabasesRequest) (*v1pb.BatchUpdateDatabasesResponse, error) {
+ requestURL := fmt.Sprintf("%s/%s/instances/-/databases:batchUpdate", c.url, c.version)
+ payload, err := protojson.Marshal(request)
+ if err != nil {
+ return nil, err
+ }
+
+ req, err := http.NewRequestWithContext(ctx, "POST", requestURL, strings.NewReader(string(payload)))
+ if err != nil {
+ return nil, err
+ }
+
+ body, err := c.doRequest(req)
+ if err != nil {
+ return nil, err
+ }
+
+ var res v1pb.BatchUpdateDatabasesResponse
+ if err := ProtojsonUnmarshaler.Unmarshal(body, &res); err != nil {
+ return nil, err
+ }
+
+ return &res, nil
+}
+
// GetDatabaseCatalog gets the database catalog by the database full name.
func (c *client) GetDatabaseCatalog(ctx context.Context, databaseName string) (*v1pb.DatabaseCatalog, error) {
body, err := c.getResource(ctx, fmt.Sprintf("%s/catalog", databaseName))
diff --git a/docs/data-sources/database_catalog.md b/docs/data-sources/database_catalog.md
index fa4d185..28a29ed 100644
--- a/docs/data-sources/database_catalog.md
+++ b/docs/data-sources/database_catalog.md
@@ -22,7 +22,7 @@ The database catalog data source.
### Read-Only
- `id` (String) The ID of this resource.
-- `schemas` (List of Object) (see [below for nested schema](#nestedatt--schemas))
+- `schemas` (Set of Object) (see [below for nested schema](#nestedatt--schemas))
### Nested Schema for `schemas`
@@ -30,7 +30,7 @@ The database catalog data source.
Read-Only:
- `name` (String)
-- `tables` (List of Object) (see [below for nested schema](#nestedobjatt--schemas--tables))
+- `tables` (Set of Object) (see [below for nested schema](#nestedobjatt--schemas--tables))
### Nested Schema for `schemas.tables`
@@ -38,7 +38,7 @@ Read-Only:
Read-Only:
- `classification` (String)
-- `columns` (List of Object) (see [below for nested schema](#nestedobjatt--schemas--tables--columns))
+- `columns` (Set of Object) (see [below for nested schema](#nestedobjatt--schemas--tables--columns))
- `name` (String)
diff --git a/docs/data-sources/instance.md b/docs/data-sources/instance.md
index 89ae596..e3e515d 100644
--- a/docs/data-sources/instance.md
+++ b/docs/data-sources/instance.md
@@ -21,7 +21,7 @@ The instance data source.
### Read-Only
-- `data_sources` (List of Object) (see [below for nested schema](#nestedatt--data_sources))
+- `data_sources` (Set of Object) (see [below for nested schema](#nestedatt--data_sources))
- `engine` (String) The instance engine. Support MYSQL, POSTGRES, TIDB, SNOWFLAKE, CLICKHOUSE, MONGODB, SQLITE, REDIS, ORACLE, SPANNER, MSSQL, REDSHIFT, MARIADB, OCEANBASE.
- `engine_version` (String) The engine version.
- `environment` (String) The environment name for your instance in "environments/{resource id}" format.
diff --git a/docs/data-sources/policy.md b/docs/data-sources/policy.md
index f99bc66..bcd12cb 100644
--- a/docs/data-sources/policy.md
+++ b/docs/data-sources/policy.md
@@ -36,7 +36,7 @@ The policy data source.
Optional:
-- `exceptions` (Block List) (see [below for nested schema](#nestedblock--masking_exception_policy--exceptions))
+- `exceptions` (Block Set) (see [below for nested schema](#nestedblock--masking_exception_policy--exceptions))
### Nested Schema for `masking_exception_policy.exceptions`
diff --git a/docs/data-sources/policy_list.md b/docs/data-sources/policy_list.md
index 3b3db7c..c709036 100644
--- a/docs/data-sources/policy_list.md
+++ b/docs/data-sources/policy_list.md
@@ -40,7 +40,7 @@ Read-Only:
Read-Only:
-- `exceptions` (List of Object) (see [below for nested schema](#nestedobjatt--policies--masking_exception_policy--exceptions))
+- `exceptions` (Set of Object) (see [below for nested schema](#nestedobjatt--policies--masking_exception_policy--exceptions))
### Nested Schema for `policies.masking_exception_policy.exceptions`
diff --git a/docs/data-sources/project.md b/docs/data-sources/project.md
index fed2d1c..7a10ec2 100644
--- a/docs/data-sources/project.md
+++ b/docs/data-sources/project.md
@@ -24,7 +24,7 @@ The project data source.
- `allow_modify_statement` (Boolean) Allow modifying statement after issue is created.
- `auto_enable_backup` (Boolean) Whether to automatically enable backup.
- `auto_resolve_issue` (Boolean) Enable auto resolve issue.
-- `databases` (List of Object) The databases in the project. (see [below for nested schema](#nestedatt--databases))
+- `databases` (Set of Object) The databases in the project. (see [below for nested schema](#nestedatt--databases))
- `enforce_issue_title` (Boolean) Enforce issue title created by user instead of generated by Bytebase.
- `id` (String) The ID of this resource.
- `key` (String) The project key.
diff --git a/docs/data-sources/project_list.md b/docs/data-sources/project_list.md
index 078d399..b822b2d 100644
--- a/docs/data-sources/project_list.md
+++ b/docs/data-sources/project_list.md
@@ -32,7 +32,7 @@ Read-Only:
- `allow_modify_statement` (Boolean)
- `auto_enable_backup` (Boolean)
- `auto_resolve_issue` (Boolean)
-- `databases` (List of Object) (see [below for nested schema](#nestedobjatt--projects--databases))
+- `databases` (Set of Object) (see [below for nested schema](#nestedobjatt--projects--databases))
- `enforce_issue_title` (Boolean)
- `key` (String)
- `members` (Set of Object) (see [below for nested schema](#nestedobjatt--projects--members))
diff --git a/docs/data-sources/setting.md b/docs/data-sources/setting.md
index 8c7447a..a03bc80 100644
--- a/docs/data-sources/setting.md
+++ b/docs/data-sources/setting.md
@@ -36,9 +36,9 @@ The setting data source.
Optional:
- `classification_from_config` (Boolean) If true, we will only store the classification in the config. Otherwise we will get the classification from table/column comment, and write back to the schema metadata.
-- `classifications` (Block List) (see [below for nested schema](#nestedblock--classification--classifications))
+- `classifications` (Block Set) (see [below for nested schema](#nestedblock--classification--classifications))
- `id` (String) The classification unique uuid.
-- `levels` (Block List) (see [below for nested schema](#nestedblock--classification--levels))
+- `levels` (Block Set) (see [below for nested schema](#nestedblock--classification--levels))
- `title` (String) The classification title. Optional.
@@ -126,7 +126,7 @@ Read-Only:
Read-Only:
-- `nodes` (List of Object) (see [below for nested schema](#nestedatt--external_approval_nodes--nodes))
+- `nodes` (Set of Object) (see [below for nested schema](#nestedatt--external_approval_nodes--nodes))
### Nested Schema for `external_approval_nodes.nodes`
diff --git a/docs/resources/database_catalog.md b/docs/resources/database_catalog.md
index 16f8ead..910935f 100644
--- a/docs/resources/database_catalog.md
+++ b/docs/resources/database_catalog.md
@@ -18,7 +18,7 @@ The database catalog resource.
### Required
- `database` (String) The database full name in instances/{instance}/databases/{database} format
-- `schemas` (Block List, Min: 1) (see [below for nested schema](#nestedblock--schemas))
+- `schemas` (Block Set, Min: 1) (see [below for nested schema](#nestedblock--schemas))
### Read-Only
@@ -29,7 +29,7 @@ The database catalog resource.
Required:
-- `tables` (Block List, Min: 1) (see [below for nested schema](#nestedblock--schemas--tables))
+- `tables` (Block Set, Min: 1) (see [below for nested schema](#nestedblock--schemas--tables))
Optional:
@@ -40,7 +40,7 @@ Optional:
Required:
-- `columns` (Block List, Min: 1) (see [below for nested schema](#nestedblock--schemas--tables--columns))
+- `columns` (Block Set, Min: 1) (see [below for nested schema](#nestedblock--schemas--tables--columns))
- `name` (String)
Optional:
diff --git a/docs/resources/instance.md b/docs/resources/instance.md
index a10b62b..c192bdd 100644
--- a/docs/resources/instance.md
+++ b/docs/resources/instance.md
@@ -17,7 +17,7 @@ The instance resource.
### Required
-- `data_sources` (Block List, Min: 1) The connection for the instance. You can configure read-only or admin connection account here. (see [below for nested schema](#nestedblock--data_sources))
+- `data_sources` (Block Set, Min: 1) The connection for the instance. You can configure read-only or admin connection account here. (see [below for nested schema](#nestedblock--data_sources))
- `engine` (String) The instance engine. Support MYSQL, POSTGRES, TIDB, SNOWFLAKE, CLICKHOUSE, MONGODB, SQLITE, REDIS, ORACLE, SPANNER, MSSQL, REDSHIFT, MARIADB, OCEANBASE.
- `environment` (String) The environment full name for the instance in environments/{environment id} format.
- `resource_id` (String) The instance unique resource id.
diff --git a/docs/resources/policy.md b/docs/resources/policy.md
index dc743a2..0d1f50d 100644
--- a/docs/resources/policy.md
+++ b/docs/resources/policy.md
@@ -36,7 +36,7 @@ The policy resource.
Optional:
-- `exceptions` (Block List) (see [below for nested schema](#nestedblock--masking_exception_policy--exceptions))
+- `exceptions` (Block Set) (see [below for nested schema](#nestedblock--masking_exception_policy--exceptions))
### Nested Schema for `masking_exception_policy.exceptions`
diff --git a/docs/resources/project.md b/docs/resources/project.md
index 31d0bae..3689bdb 100644
--- a/docs/resources/project.md
+++ b/docs/resources/project.md
@@ -26,7 +26,7 @@ The project resource.
- `allow_modify_statement` (Boolean) Allow modifying statement after issue is created.
- `auto_enable_backup` (Boolean) Whether to automatically enable backup.
- `auto_resolve_issue` (Boolean) Enable auto resolve issue.
-- `databases` (Block List) The databases in the project. (see [below for nested schema](#nestedblock--databases))
+- `databases` (Block Set) The databases in the project. (see [below for nested schema](#nestedblock--databases))
- `enforce_issue_title` (Boolean) Enforce issue title created by user instead of generated by Bytebase.
- `members` (Block Set) The members in the project. (see [below for nested schema](#nestedblock--members))
- `postgres_database_tenant_mode` (Boolean) Whether to enable the database tenant mode for PostgreSQL. If enabled, the issue will be created with the pre-appended "set role " statement.
diff --git a/docs/resources/setting.md b/docs/resources/setting.md
index 421ad28..96ce8bd 100644
--- a/docs/resources/setting.md
+++ b/docs/resources/setting.md
@@ -91,9 +91,9 @@ Optional:
Optional:
- `classification_from_config` (Boolean) If true, we will only store the classification in the config. Otherwise we will get the classification from table/column comment, and write back to the schema metadata.
-- `classifications` (Block List) (see [below for nested schema](#nestedblock--classification--classifications))
+- `classifications` (Block Set) (see [below for nested schema](#nestedblock--classification--classifications))
- `id` (String) The classification unique uuid.
-- `levels` (Block List) (see [below for nested schema](#nestedblock--classification--levels))
+- `levels` (Block Set) (see [below for nested schema](#nestedblock--classification--levels))
- `title` (String) The classification title. Optional.
@@ -123,7 +123,7 @@ Optional:
Required:
-- `nodes` (Block List, Min: 1) (see [below for nested schema](#nestedblock--external_approval_nodes--nodes))
+- `nodes` (Block Set, Min: 1) (see [below for nested schema](#nestedblock--external_approval_nodes--nodes))
### Nested Schema for `external_approval_nodes.nodes`
diff --git a/examples/environments/main.tf b/examples/environments/main.tf
index a22b79a..19fc9d0 100644
--- a/examples/environments/main.tf
+++ b/examples/environments/main.tf
@@ -2,7 +2,7 @@
terraform {
required_providers {
bytebase = {
- version = "1.0.8"
+ version = "1.0.9"
# For local development, please use "terraform.local/bytebase/bytebase" instead
source = "registry.terraform.io/bytebase/bytebase"
}
diff --git a/examples/groups/main.tf b/examples/groups/main.tf
index 9fa05f3..1577f8b 100644
--- a/examples/groups/main.tf
+++ b/examples/groups/main.tf
@@ -1,7 +1,7 @@
terraform {
required_providers {
bytebase = {
- version = "1.0.8"
+ version = "1.0.9"
# For local development, please use "terraform.local/bytebase/bytebase" instead
source = "registry.terraform.io/bytebase/bytebase"
}
diff --git a/examples/instances/main.tf b/examples/instances/main.tf
index ba73b15..f9fef86 100644
--- a/examples/instances/main.tf
+++ b/examples/instances/main.tf
@@ -2,7 +2,7 @@
terraform {
required_providers {
bytebase = {
- version = "1.0.8"
+ version = "1.0.9"
# For local development, please use "terraform.local/bytebase/bytebase" instead
source = "registry.terraform.io/bytebase/bytebase"
}
diff --git a/examples/policies/main.tf b/examples/policies/main.tf
index f561371..48fb027 100644
--- a/examples/policies/main.tf
+++ b/examples/policies/main.tf
@@ -1,7 +1,7 @@
terraform {
required_providers {
bytebase = {
- version = "1.0.8"
+ version = "1.0.9"
# For local development, please use "terraform.local/bytebase/bytebase" instead
source = "registry.terraform.io/bytebase/bytebase"
}
diff --git a/examples/projects/main.tf b/examples/projects/main.tf
index e76bbc5..a405d25 100644
--- a/examples/projects/main.tf
+++ b/examples/projects/main.tf
@@ -2,7 +2,7 @@
terraform {
required_providers {
bytebase = {
- version = "1.0.8"
+ version = "1.0.9"
# For local development, please use "terraform.local/bytebase/bytebase" instead
source = "registry.terraform.io/bytebase/bytebase"
}
diff --git a/examples/settings/main.tf b/examples/settings/main.tf
index dbf691c..51d14f1 100644
--- a/examples/settings/main.tf
+++ b/examples/settings/main.tf
@@ -1,7 +1,7 @@
terraform {
required_providers {
bytebase = {
- version = "1.0.8"
+ version = "1.0.9"
# For local development, please use "terraform.local/bytebase/bytebase" instead
source = "registry.terraform.io/bytebase/bytebase"
}
diff --git a/examples/setup/main.tf b/examples/setup/main.tf
index b5f8506..0b2636b 100644
--- a/examples/setup/main.tf
+++ b/examples/setup/main.tf
@@ -1,7 +1,7 @@
terraform {
required_providers {
bytebase = {
- version = "1.0.8"
+ version = "1.0.9"
# For local development, please use "terraform.local/bytebase/bytebase" instead
source = "registry.terraform.io/bytebase/bytebase"
}
diff --git a/examples/users/main.tf b/examples/users/main.tf
index 0972cc0..9407675 100644
--- a/examples/users/main.tf
+++ b/examples/users/main.tf
@@ -1,7 +1,7 @@
terraform {
required_providers {
bytebase = {
- version = "1.0.8"
+ version = "1.0.9"
# For local development, please use "terraform.local/bytebase/bytebase" instead
source = "registry.terraform.io/bytebase/bytebase"
}
diff --git a/examples/vcs/main.tf b/examples/vcs/main.tf
index 12d72e5..91f03a5 100644
--- a/examples/vcs/main.tf
+++ b/examples/vcs/main.tf
@@ -1,7 +1,7 @@
terraform {
required_providers {
bytebase = {
- version = "1.0.8"
+ version = "1.0.9"
# For local development, please use "terraform.local/bytebase/bytebase" instead
source = "registry.terraform.io/bytebase/bytebase"
}
diff --git a/provider/data_source_database_catalog.go b/provider/data_source_database_catalog.go
index e634e71..b43d1cb 100644
--- a/provider/data_source_database_catalog.go
+++ b/provider/data_source_database_catalog.go
@@ -1,6 +1,7 @@
package provider
import (
+ "bytes"
"context"
"fmt"
"regexp"
@@ -29,7 +30,7 @@ func dataSourceDatabaseCatalog() *schema.Resource {
},
"schemas": {
Computed: true,
- Type: schema.TypeList,
+ Type: schema.TypeSet,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
@@ -38,7 +39,7 @@ func dataSourceDatabaseCatalog() *schema.Resource {
},
"tables": {
Computed: true,
- Type: schema.TypeList,
+ Type: schema.TypeSet,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
@@ -52,7 +53,7 @@ func dataSourceDatabaseCatalog() *schema.Resource {
},
"columns": {
Computed: true,
- Type: schema.TypeList,
+ Type: schema.TypeSet,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
@@ -76,12 +77,19 @@ func dataSourceDatabaseCatalog() *schema.Resource {
},
},
},
+ Set: func(i interface{}) int {
+ return internal.ToHashcodeInt(columnHash(i))
+ },
},
},
},
+ Set: func(i interface{}) int {
+ return internal.ToHashcodeInt(tableHash(i))
+ },
},
},
},
+ Set: schemaHash,
},
},
}
@@ -108,11 +116,11 @@ func setDatabaseCatalog(d *schema.ResourceData, catalog *v1pb.DatabaseCatalog) d
}
schemaList := []interface{}{}
- for _, schema := range catalog.Schemas {
+ for _, schemaCatalog := range catalog.Schemas {
rawSchema := map[string]interface{}{}
tableList := []interface{}{}
- for _, table := range schema.Tables {
+ for _, table := range schemaCatalog.Tables {
rawTable := map[string]interface{}{}
rawTable["name"] = table.Name
rawTable["classification"] = table.Classification
@@ -126,15 +134,64 @@ func setDatabaseCatalog(d *schema.ResourceData, catalog *v1pb.DatabaseCatalog) d
rawColumn["labels"] = column.Labels
columnList = append(columnList, rawColumn)
}
- rawTable["columns"] = columnList
+ rawTable["columns"] = schema.NewSet(func(i interface{}) int {
+ return internal.ToHashcodeInt(columnHash(i))
+ }, columnList)
tableList = append(tableList, rawTable)
}
- rawSchema["tables"] = tableList
+ rawSchema["tables"] = schema.NewSet(func(i interface{}) int {
+ return internal.ToHashcodeInt(tableHash(i))
+ }, tableList)
schemaList = append(schemaList, rawSchema)
}
- if err := d.Set("schemas", schemaList); err != nil {
+ if err := d.Set("schemas", schema.NewSet(schemaHash, schemaList)); err != nil {
return diag.Errorf("cannot set schemas: %s", err.Error())
}
return nil
}
+
+func columnHash(rawColumn interface{}) string {
+ var buf bytes.Buffer
+ column := rawColumn.(map[string]interface{})
+
+ if v, ok := column["name"].(string); ok {
+ _, _ = buf.WriteString(fmt.Sprintf("%s-", v))
+ }
+
+ return buf.String()
+}
+
+func tableHash(rawTable interface{}) string {
+ var buf bytes.Buffer
+ table := rawTable.(map[string]interface{})
+
+ if v, ok := table["name"].(string); ok {
+ _, _ = buf.WriteString(fmt.Sprintf("%s-", v))
+ }
+ if columns, ok := table["columns"].(*schema.Set); ok {
+ for _, column := range columns.List() {
+ rawColumn := column.(map[string]interface{})
+ _, _ = buf.WriteString(columnHash(rawColumn))
+ }
+ }
+
+ return buf.String()
+}
+
+func schemaHash(rawSchema interface{}) int {
+ var buf bytes.Buffer
+ raw := rawSchema.(map[string]interface{})
+
+ if v, ok := raw["name"].(string); ok {
+ _, _ = buf.WriteString(fmt.Sprintf("%s-", v))
+ }
+ if tables, ok := raw["tables"].(*schema.Set); ok {
+ for _, table := range tables.List() {
+ rawTable := table.(map[string]interface{})
+ _, _ = buf.WriteString(tableHash(rawTable))
+ }
+ }
+
+ return internal.ToHashcodeInt(buf.String())
+}
diff --git a/provider/data_source_instance.go b/provider/data_source_instance.go
index bd14105..8bbed10 100644
--- a/provider/data_source_instance.go
+++ b/provider/data_source_instance.go
@@ -63,7 +63,7 @@ func dataSourceInstance() *schema.Resource {
Description: "The maximum number of connections. The default value is 10.",
},
"data_sources": {
- Type: schema.TypeList,
+ Type: schema.TypeSet,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
@@ -123,6 +123,7 @@ func dataSourceInstance() *schema.Resource {
},
},
},
+ Set: dataSourceHash,
},
},
}
diff --git a/provider/data_source_policy.go b/provider/data_source_policy.go
index 51e475f..a4a3325 100644
--- a/provider/data_source_policy.go
+++ b/provider/data_source_policy.go
@@ -1,6 +1,7 @@
package provider
import (
+ "bytes"
"context"
"fmt"
"regexp"
@@ -83,7 +84,7 @@ func getMaskingExceptionPolicySchema(computed bool) *schema.Schema {
Optional: true,
Default: nil,
MinItems: 0,
- Type: schema.TypeList,
+ Type: schema.TypeSet,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"database": {
@@ -134,6 +135,7 @@ func getMaskingExceptionPolicySchema(computed bool) *schema.Schema {
},
},
},
+ Set: exceptionHash,
},
},
},
@@ -243,7 +245,36 @@ func flattenMaskingExceptionPolicy(p *v1pb.MaskingExceptionPolicy) ([]interface{
exceptionList = append(exceptionList, raw)
}
policy := map[string]interface{}{
- "exceptions": exceptionList,
+ "exceptions": schema.NewSet(exceptionHash, exceptionList),
}
return []interface{}{policy}, nil
}
+
+func exceptionHash(rawException interface{}) int {
+ var buf bytes.Buffer
+ exception := rawException.(map[string]interface{})
+
+ if v, ok := exception["database"].(string); ok {
+ _, _ = buf.WriteString(fmt.Sprintf("%s-", v))
+ }
+ if v, ok := exception["schema"].(string); ok {
+ _, _ = buf.WriteString(fmt.Sprintf("%s-", v))
+ }
+ if v, ok := exception["table"].(string); ok {
+ _, _ = buf.WriteString(fmt.Sprintf("%s-", v))
+ }
+ if v, ok := exception["column"].(string); ok {
+ _, _ = buf.WriteString(fmt.Sprintf("%s-", v))
+ }
+ if v, ok := exception["member"].(string); ok {
+ _, _ = buf.WriteString(fmt.Sprintf("%s-", v))
+ }
+ if v, ok := exception["action"].(string); ok {
+ _, _ = buf.WriteString(fmt.Sprintf("%s-", v))
+ }
+ if v, ok := exception["expire_timestamp"].(string); ok {
+ _, _ = buf.WriteString(fmt.Sprintf("%s-", v))
+ }
+
+ return internal.ToHashcodeInt(buf.String())
+}
diff --git a/provider/data_source_project.go b/provider/data_source_project.go
index 157a7be..db884e9 100644
--- a/provider/data_source_project.go
+++ b/provider/data_source_project.go
@@ -87,7 +87,7 @@ func dataSourceProject() *schema.Resource {
func getProjectDatabasesSchema(computed bool) *schema.Schema {
return &schema.Schema{
- Type: schema.TypeList,
+ Type: schema.TypeSet,
Computed: computed,
Optional: !computed,
Description: "The databases in the project.",
@@ -128,6 +128,7 @@ func getProjectDatabasesSchema(computed bool) *schema.Schema {
},
},
},
+ Set: databaseHash,
}
}
@@ -352,7 +353,8 @@ func setProject(
return diag.Errorf("cannot set postgres_database_tenant_mode for project: %s", err.Error())
}
- if err := d.Set("databases", flattenDatabaseList(listDBResponse.Databases)); err != nil {
+ databaseList := flattenDatabaseList(listDBResponse.Databases)
+ if err := d.Set("databases", schema.NewSet(databaseHash, databaseList)); err != nil {
return diag.Errorf("cannot set databases for project: %s", err.Error())
}
@@ -367,6 +369,16 @@ func setProject(
return nil
}
+func databaseHash(rawDatabase interface{}) int {
+ var buf bytes.Buffer
+ database := rawDatabase.(map[string]interface{})
+
+ if v, ok := database["name"].(string); ok {
+ _, _ = buf.WriteString(fmt.Sprintf("%s-", v))
+ }
+ return internal.ToHashcodeInt(buf.String())
+}
+
func memberHash(rawMember interface{}) int {
var buf bytes.Buffer
member := rawMember.(map[string]interface{})
diff --git a/provider/data_source_setting.go b/provider/data_source_setting.go
index c5240c1..dbcd758 100644
--- a/provider/data_source_setting.go
+++ b/provider/data_source_setting.go
@@ -1,6 +1,7 @@
package provider
import (
+ "bytes"
"context"
"fmt"
@@ -72,7 +73,7 @@ func getClassificationSetting(computed bool) *schema.Schema {
"levels": {
Computed: computed,
Optional: true,
- Type: schema.TypeList,
+ Type: schema.TypeSet,
MinItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
@@ -96,11 +97,12 @@ func getClassificationSetting(computed bool) *schema.Schema {
},
},
},
+ Set: itemIDHash,
},
"classifications": {
Computed: computed,
Optional: true,
- Type: schema.TypeList,
+ Type: schema.TypeSet,
MinItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
@@ -130,6 +132,7 @@ func getClassificationSetting(computed bool) *schema.Schema {
},
},
},
+ Set: itemIDHash,
},
},
},
@@ -194,7 +197,7 @@ func getExternalApprovalSetting(computed bool) *schema.Schema {
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"nodes": {
- Type: schema.TypeList,
+ Type: schema.TypeSet,
Computed: computed,
Required: !computed,
Elem: &schema.Resource{
@@ -219,6 +222,7 @@ func getExternalApprovalSetting(computed bool) *schema.Schema {
},
},
},
+ Set: itemIDHash,
},
},
},
@@ -544,7 +548,7 @@ func flattenClassificationSetting(setting *v1pb.DataClassificationSetting) []int
rawLevel["description"] = level.Description
rawLevels = append(rawLevels, rawLevel)
}
- raw["levels"] = rawLevels
+ raw["levels"] = schema.NewSet(itemIDHash, rawLevels)
rawClassifications := []interface{}{}
for _, classification := range config.GetClassification() {
@@ -555,8 +559,19 @@ func flattenClassificationSetting(setting *v1pb.DataClassificationSetting) []int
rawClassification["level"] = classification.LevelId
rawClassifications = append(rawClassifications, rawClassification)
}
- raw["classifications"] = rawClassifications
+ raw["classifications"] = schema.NewSet(itemIDHash, rawClassifications)
}
return []interface{}{raw}
}
+
+func itemIDHash(rawItem interface{}) int {
+ var buf bytes.Buffer
+ item := rawItem.(map[string]interface{})
+
+ if v, ok := item["id"].(string); ok {
+ _, _ = buf.WriteString(fmt.Sprintf("%s-", v))
+ }
+
+ return internal.ToHashcodeInt(buf.String())
+}
diff --git a/provider/internal/mock_client.go b/provider/internal/mock_client.go
index 7502f87..b54ceaf 100644
--- a/provider/internal/mock_client.go
+++ b/provider/internal/mock_client.go
@@ -412,6 +412,22 @@ func (c *mockClient) UpdateDatabase(ctx context.Context, patch *v1pb.Database, u
return db, nil
}
+// BatchUpdateDatabases batch updates databases.
+func (c *mockClient) BatchUpdateDatabases(ctx context.Context, request *v1pb.BatchUpdateDatabasesRequest) (*v1pb.BatchUpdateDatabasesResponse, error) {
+ for _, req := range request.Requests {
+ db, err := c.GetDatabase(ctx, req.Database.Name)
+ if err != nil {
+ return nil, err
+ }
+ if slices.Contains(req.UpdateMask.Paths, "project") {
+ db.Project = req.Database.Project
+ }
+ c.databaseMap[db.Name] = db
+ }
+
+ return &v1pb.BatchUpdateDatabasesResponse{}, nil
+}
+
// GetDatabaseCatalog gets the database catalog by the database full name.
func (c *mockClient) GetDatabaseCatalog(_ context.Context, databaseName string) (*v1pb.DatabaseCatalog, error) {
db, ok := c.databaseCatalogMap[databaseName]
diff --git a/provider/resource_database_catalog.go b/provider/resource_database_catalog.go
index 3d83369..fcca4fb 100644
--- a/provider/resource_database_catalog.go
+++ b/provider/resource_database_catalog.go
@@ -38,7 +38,7 @@ func resourceDatabaseCatalog() *schema.Resource {
},
"schemas": {
Required: true,
- Type: schema.TypeList,
+ Type: schema.TypeSet,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
@@ -48,7 +48,7 @@ func resourceDatabaseCatalog() *schema.Resource {
},
"tables": {
Required: true,
- Type: schema.TypeList,
+ Type: schema.TypeSet,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
@@ -64,7 +64,7 @@ func resourceDatabaseCatalog() *schema.Resource {
},
"columns": {
Required: true,
- Type: schema.TypeList,
+ Type: schema.TypeSet,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
@@ -88,12 +88,19 @@ func resourceDatabaseCatalog() *schema.Resource {
},
},
},
+ Set: func(i interface{}) int {
+ return internal.ToHashcodeInt(columnHash(i))
+ },
},
},
},
+ Set: func(i interface{}) int {
+ return internal.ToHashcodeInt(tableHash(i))
+ },
},
},
},
+ Set: schemaHash,
},
},
}
@@ -177,7 +184,7 @@ func convertToDatabaseCatalog(d *schema.ResourceData) (*v1pb.DatabaseCatalog, er
if !ok || database == "" {
return nil, errors.Errorf("invalid database")
}
- rawSchemaList, ok := d.Get("schemas").([]interface{})
+ rawSchemaList, ok := d.Get("schemas").(*schema.Set)
if !ok {
return nil, errors.Errorf("invalid schemas")
}
@@ -187,17 +194,17 @@ func convertToDatabaseCatalog(d *schema.ResourceData) (*v1pb.DatabaseCatalog, er
Schemas: []*v1pb.SchemaCatalog{},
}
- for _, schema := range rawSchemaList {
- rawSchema := schema.(map[string]interface{})
- schema := &v1pb.SchemaCatalog{
+ for _, raw := range rawSchemaList.List() {
+ rawSchema := raw.(map[string]interface{})
+ schemaCatalog := &v1pb.SchemaCatalog{
Name: rawSchema["name"].(string),
}
- rawTableList, ok := rawSchema["tables"].([]interface{})
+ rawTableList, ok := rawSchema["tables"].(*schema.Set)
if !ok {
return nil, errors.Errorf("invalid tables")
}
- for _, table := range rawTableList {
+ for _, table := range rawTableList.List() {
rawTable := table.(map[string]interface{})
table := &v1pb.TableCatalog{
Name: rawTable["name"].(string),
@@ -205,11 +212,11 @@ func convertToDatabaseCatalog(d *schema.ResourceData) (*v1pb.DatabaseCatalog, er
}
columnList := []*v1pb.ColumnCatalog{}
- rawColumnList, ok := rawTable["columns"].([]interface{})
+ rawColumnList, ok := rawTable["columns"].(*schema.Set)
if !ok {
return nil, errors.Errorf("invalid columns")
}
- for _, column := range rawColumnList {
+ for _, column := range rawColumnList.List() {
rawColumn := column.(map[string]interface{})
labels := map[string]string{}
for key, val := range rawColumn["labels"].(map[string]interface{}) {
@@ -231,10 +238,10 @@ func convertToDatabaseCatalog(d *schema.ResourceData) (*v1pb.DatabaseCatalog, er
},
}
- schema.Tables = append(schema.Tables, table)
+ schemaCatalog.Tables = append(schemaCatalog.Tables, table)
}
- catalog.Schemas = append(catalog.Schemas, schema)
+ catalog.Schemas = append(catalog.Schemas, schemaCatalog)
}
return catalog, nil
diff --git a/provider/resource_instance.go b/provider/resource_instance.go
index 31f8f72..484ae60 100644
--- a/provider/resource_instance.go
+++ b/provider/resource_instance.go
@@ -1,6 +1,7 @@
package provider
import (
+ "bytes"
"context"
"fmt"
"regexp"
@@ -103,7 +104,7 @@ func resourceInstance() *schema.Resource {
Description: "The maximum number of connections.",
},
"data_sources": {
- Type: schema.TypeList,
+ Type: schema.TypeSet,
Required: true,
MinItems: 1,
Description: "The connection for the instance. You can configure read-only or admin connection account here.",
@@ -177,6 +178,7 @@ func resourceInstance() *schema.Resource {
},
},
},
+ Set: dataSourceHash,
},
},
}
@@ -473,7 +475,7 @@ func setInstanceMessage(d *schema.ResourceData, instance *v1pb.Instance) diag.Di
if err != nil {
return diag.FromErr(err)
}
- if err := d.Set("data_sources", dataSources); err != nil {
+ if err := d.Set("data_sources", schema.NewSet(dataSourceHash, dataSources)); err != nil {
return diag.Errorf("cannot set data_sources for instance: %s", err.Error())
}
@@ -512,51 +514,67 @@ func flattenDataSourceList(d *schema.ResourceData, dataSourceList []*v1pb.DataSo
return res, nil
}
+func dataSourceHash(rawDataSource interface{}) int {
+ var buf bytes.Buffer
+ dataSource := rawDataSource.(map[string]interface{})
+
+ if v, ok := dataSource["id"].(string); ok {
+ _, _ = buf.WriteString(fmt.Sprintf("%s-", v))
+ }
+ if v, ok := dataSource["type"].(string); ok {
+ _, _ = buf.WriteString(fmt.Sprintf("%s-", v))
+ }
+ return internal.ToHashcodeInt(buf.String())
+}
+
func convertDataSourceCreateList(d *schema.ResourceData, validate bool) ([]*v1pb.DataSource, error) {
var dataSourceList []*v1pb.DataSource
- if rawList, ok := d.Get("data_sources").([]interface{}); ok {
- dataSourceTypeMap := map[v1pb.DataSourceType]bool{}
- for _, raw := range rawList {
- obj := raw.(map[string]interface{})
- dataSource := &v1pb.DataSource{
- Id: obj["id"].(string),
- Type: v1pb.DataSourceType(v1pb.DataSourceType_value[obj["type"].(string)]),
- }
- if dataSourceTypeMap[dataSource.Type] && dataSource.Type == v1pb.DataSourceType_ADMIN {
- return nil, errors.Errorf("duplicate data source type ADMIN")
- }
- dataSourceTypeMap[dataSource.Type] = true
+ dataSourceSet, ok := d.Get("data_sources").(*schema.Set)
+ if !ok {
+ return dataSourceList, nil
+ }
- if v, ok := obj["username"].(string); ok {
- dataSource.Username = v
- }
- if v, ok := obj["password"].(string); ok && v != "" {
- dataSource.Password = v
- }
- if v, ok := obj["ssl_ca"].(string); ok {
- dataSource.SslCa = v
- }
- if v, ok := obj["ssl_cert"].(string); ok {
- dataSource.SslCert = v
- }
- if v, ok := obj["ssl_key"].(string); ok {
- dataSource.SslKey = v
- }
- if v, ok := obj["host"].(string); ok {
- dataSource.Host = v
- }
- if v, ok := obj["port"].(string); ok {
- dataSource.Port = v
- }
- if v, ok := obj["database"].(string); ok {
- dataSource.Database = v
- }
- dataSourceList = append(dataSourceList, dataSource)
+ dataSourceTypeMap := map[v1pb.DataSourceType]bool{}
+ for _, raw := range dataSourceSet.List() {
+ obj := raw.(map[string]interface{})
+ dataSource := &v1pb.DataSource{
+ Id: obj["id"].(string),
+ Type: v1pb.DataSourceType(v1pb.DataSourceType_value[obj["type"].(string)]),
+ }
+ if dataSourceTypeMap[dataSource.Type] && dataSource.Type == v1pb.DataSourceType_ADMIN {
+ return nil, errors.Errorf("duplicate data source type ADMIN")
}
+ dataSourceTypeMap[dataSource.Type] = true
- if !dataSourceTypeMap[v1pb.DataSourceType_ADMIN] && validate {
- return nil, errors.Errorf("data source \"%v\" is required", v1pb.DataSourceType_ADMIN.String())
+ if v, ok := obj["username"].(string); ok {
+ dataSource.Username = v
+ }
+ if v, ok := obj["password"].(string); ok && v != "" {
+ dataSource.Password = v
+ }
+ if v, ok := obj["ssl_ca"].(string); ok {
+ dataSource.SslCa = v
+ }
+ if v, ok := obj["ssl_cert"].(string); ok {
+ dataSource.SslCert = v
}
+ if v, ok := obj["ssl_key"].(string); ok {
+ dataSource.SslKey = v
+ }
+ if v, ok := obj["host"].(string); ok {
+ dataSource.Host = v
+ }
+ if v, ok := obj["port"].(string); ok {
+ dataSource.Port = v
+ }
+ if v, ok := obj["database"].(string); ok {
+ dataSource.Database = v
+ }
+ dataSourceList = append(dataSourceList, dataSource)
+ }
+
+ if !dataSourceTypeMap[v1pb.DataSourceType_ADMIN] && validate {
+ return nil, errors.Errorf("data source \"%v\" is required", v1pb.DataSourceType_ADMIN.String())
}
return dataSourceList, nil
diff --git a/provider/resource_policy.go b/provider/resource_policy.go
index ccc211d..f174834 100644
--- a/provider/resource_policy.go
+++ b/provider/resource_policy.go
@@ -215,11 +215,14 @@ func convertToMaskingExceptionPolicy(d *schema.ResourceData) (*v1pb.MaskingExcep
}
raw := rawList[0].(map[string]interface{})
- exceptionList := raw["exceptions"].([]interface{})
+ exceptionList, ok := raw["exceptions"].(*schema.Set)
+ if !ok {
+ return nil, errors.Errorf("invalid exceptions")
+ }
policy := &v1pb.MaskingExceptionPolicy{}
- for _, exception := range exceptionList {
+ for _, exception := range exceptionList.List() {
rawException := exception.(map[string]interface{})
databaseFullName := rawException["database"].(string)
diff --git a/provider/resource_project.go b/provider/resource_project.go
index 21a75f3..2b9f82a 100644
--- a/provider/resource_project.go
+++ b/provider/resource_project.go
@@ -12,6 +12,7 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
"github.com/pkg/errors"
"google.golang.org/genproto/googleapis/type/expr"
+ fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
"github.com/bytebase/terraform-provider-bytebase/api"
"github.com/bytebase/terraform-provider-bytebase/provider/internal"
@@ -375,12 +376,13 @@ func updateDatabasesInProject(ctx context.Context, d *schema.ResourceData, clien
existedDBMap[db.Name] = db
}
- rawList, ok := d.Get("databases").([]interface{})
+ rawSet, ok := d.Get("databases").(*schema.Set)
if !ok {
return nil
}
updatedDBMap := map[string]*v1pb.Database{}
- for _, raw := range rawList {
+ batchTransferDatabases := []*v1pb.UpdateDatabaseRequest{}
+ for _, raw := range rawSet.List() {
obj := raw.(map[string]interface{})
dbName := obj["name"].(string)
if _, _, err := internal.GetInstanceDatabaseID(dbName); err != nil {
@@ -397,20 +399,52 @@ func updateDatabasesInProject(ctx context.Context, d *schema.ResourceData, clien
Project: projectName,
Labels: labels,
}
- if _, err := client.UpdateDatabase(ctx, updatedDBMap[dbName], []string{"project", "label"}); err != nil {
- return diag.Errorf("failed to update database %s with error: %v", dbName, err)
+ if _, ok := existedDBMap[dbName]; !ok {
+ batchTransferDatabases = append(batchTransferDatabases, &v1pb.UpdateDatabaseRequest{
+ Database: updatedDBMap[dbName],
+ UpdateMask: &fieldmaskpb.FieldMask{
+ Paths: []string{"project"},
+ },
+ })
+ }
+ }
+
+ if len(batchTransferDatabases) > 0 {
+ if _, err := client.BatchUpdateDatabases(ctx, &v1pb.BatchUpdateDatabasesRequest{
+ Requests: batchTransferDatabases,
+ Parent: "instances/-",
+ }); err != nil {
+ return diag.Errorf("failed to assign databases to project %s", projectName)
+ }
+ }
+
+ for _, database := range updatedDBMap {
+ if len(database.Labels) > 0 {
+ if _, err := client.UpdateDatabase(ctx, database, []string{"label"}); err != nil {
+ return diag.Errorf("failed to update database %s with error: %v", database.Name, err)
+ }
}
}
+ unassignDatabases := []*v1pb.UpdateDatabaseRequest{}
for _, db := range existedDBMap {
if _, ok := updatedDBMap[db.Name]; !ok {
// move db to default project
- if _, err := client.UpdateDatabase(ctx, &v1pb.Database{
- Name: db.Name,
- Project: projectName,
- }, []string{"project"}); err != nil {
- return diag.Errorf("failed to move database %s to project %s with error: %v", db.Name, defaultProj, err)
- }
+ db.Project = defaultProj
+ unassignDatabases = append(unassignDatabases, &v1pb.UpdateDatabaseRequest{
+ Database: db,
+ UpdateMask: &fieldmaskpb.FieldMask{
+ Paths: []string{"project"},
+ },
+ })
+ }
+ }
+ if len(unassignDatabases) > 0 {
+ if _, err := client.BatchUpdateDatabases(ctx, &v1pb.BatchUpdateDatabasesRequest{
+ Requests: unassignDatabases,
+ Parent: "instances/-",
+ }); err != nil {
+ return diag.Errorf("failed to move databases to default project")
}
}
@@ -484,11 +518,11 @@ func updateMembersInProject(ctx context.Context, d *schema.ResourceData, client
})
}
- if !existProjectOwner {
- return diag.Errorf("require at least 1 member with roles/projectOwner role")
- }
-
if len(iamPolicy.Bindings) > 0 {
+ if !existProjectOwner {
+ return diag.Errorf("require at least 1 member with roles/projectOwner role")
+ }
+
if _, err := client.SetProjectIAMPolicy(ctx, projectName, &v1pb.SetIamPolicyRequest{
Policy: iamPolicy,
Etag: iamPolicy.Etag,
diff --git a/provider/resource_setting.go b/provider/resource_setting.go
index 4702a8b..87632cc 100644
--- a/provider/resource_setting.go
+++ b/provider/resource_setting.go
@@ -176,8 +176,11 @@ func convertToV1ClassificationSetting(d *schema.ResourceData) (*v1pb.DataClassif
return nil, errors.Errorf("id is required for classification config")
}
- rawLevels := raw["levels"].([]interface{})
- for _, level := range rawLevels {
+ rawLevels := raw["levels"].(*schema.Set)
+ if !ok {
+ return nil, errors.Errorf("levels is required for classification config")
+ }
+ for _, level := range rawLevels.List() {
rawLevel := level.(map[string]interface{})
classificationLevel := &v1pb.DataClassificationSetting_DataClassificationConfig_Level{
Id: rawLevel["id"].(string),
@@ -193,8 +196,11 @@ func convertToV1ClassificationSetting(d *schema.ResourceData) (*v1pb.DataClassif
dataClassificationConfig.Levels = append(dataClassificationConfig.Levels, classificationLevel)
}
- rawClassificationss := raw["classifications"].([]interface{})
- for _, classification := range rawClassificationss {
+ rawClassificationss := raw["classifications"].(*schema.Set)
+ if !ok {
+ return nil, errors.Errorf("classifications is required for classification config")
+ }
+ for _, classification := range rawClassificationss.List() {
rawClassification := classification.(map[string]interface{})
classificationData := &v1pb.DataClassificationSetting_DataClassificationConfig_DataClassification{
Id: rawClassification["id"].(string),
@@ -228,10 +234,14 @@ func convertToV1ExternalNodesSetting(d *schema.ResourceData) (*v1pb.ExternalAppr
}
raw := rawList[0].(map[string]interface{})
- nodes := raw["nodes"].([]interface{})
+ nodes, ok := raw["nodes"].(*schema.Set)
+ if !ok {
+ return nil, errors.Errorf("missing nodes")
+ }
+
externalApprovalSetting := &v1pb.ExternalApprovalSetting{}
- for _, node := range nodes {
+ for _, node := range nodes.List() {
rawNode := node.(map[string]interface{})
externalApprovalSetting.Nodes = append(externalApprovalSetting.Nodes, &v1pb.ExternalApprovalSetting_Node{
Id: rawNode["id"].(string),