From b3cd7d369a5edbbec3241ff04fb4ad46e87a8bb8 Mon Sep 17 00:00:00 2001 From: MyroslavLevchyk Date: Thu, 24 Jul 2025 16:18:22 +0300 Subject: [PATCH 1/2] feat: added lakebase instance --- README.md | 32 +++++++++++++++++--------------- lakebase.tf | 9 +++++++++ variables.tf | 20 ++++++++++++++++++++ versions.tf | 2 +- 4 files changed, 47 insertions(+), 16 deletions(-) create mode 100644 lakebase.tf diff --git a/README.md b/README.md index 85c9412..bb1f358 100644 --- a/README.md +++ b/README.md @@ -354,13 +354,13 @@ module "databricks_runtime" { | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >=1.3 | -| [databricks](#requirement\_databricks) | ~>1.0 | +| [databricks](#requirement\_databricks) | >=1.85.0 | ## Providers | Name | Version | |------|---------| -| [databricks](#provider\_databricks) | ~>1.0 | +| [databricks](#provider\_databricks) | >=1.85.0 | ## Modules @@ -373,6 +373,7 @@ No modules. | [databricks_cluster.this](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/cluster) | resource | | [databricks_cluster_policy.overrides](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/cluster_policy) | resource | | [databricks_cluster_policy.this](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/cluster_policy) | resource | +| [databricks_database_instance.this](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/database_instance) | resource | | [databricks_entitlements.this](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/entitlements) | resource | | [databricks_group.this](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/group) | resource | | [databricks_ip_access_list.allowed_list](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/ip_access_list) | resource | @@ -398,22 +399,23 @@ No modules. | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| | [cloud\_name](#input\_cloud\_name) | Cloud Name | `string` | n/a | yes | -| [clusters](#input\_clusters) | Set of objects with parameters to configure Databricks clusters and assign permissions to it for certain custom groups |
set(object({
cluster_name = string
spark_version = optional(string, "15.3.x-scala2.12")
spark_conf = optional(map(any), {})
spark_env_vars = optional(map(any), {})
data_security_mode = optional(string, "USER_ISOLATION")
aws_attributes = optional(object({
availability = optional(string)
zone_id = optional(string)
first_on_demand = optional(number)
spot_bid_price_percent = optional(number)
ebs_volume_count = optional(number)
ebs_volume_size = optional(number)
ebs_volume_type = optional(string)
}), {
availability = "ON_DEMAND"
zone_id = "auto"
first_on_demand = 0
spot_bid_price_percent = 100
ebs_volume_count = 1
ebs_volume_size = 100
ebs_volume_type = "GENERAL_PURPOSE_SSD"
})
azure_attributes = optional(object({
availability = optional(string)
first_on_demand = optional(number)
spot_bid_max_price = optional(number, 1)
}), {
availability = "ON_DEMAND_AZURE"
first_on_demand = 0
})
node_type_id = optional(string, null)
autotermination_minutes = optional(number, 20)
min_workers = optional(number, 1)
max_workers = optional(number, 2)
cluster_log_conf_destination = optional(string, null)
init_scripts_workspace = optional(set(string), [])
init_scripts_volumes = optional(set(string), [])
init_scripts_dbfs = optional(set(string), [])
init_scripts_abfss = optional(set(string), [])
single_user_name = optional(string, null)
single_node_enable = optional(bool, false)
custom_tags = optional(map(string), {})
permissions = optional(set(object({
group_name = string
permission_level = string
})), [])
pypi_library_repository = optional(set(string), [])
maven_library_repository = optional(set(object({
coordinates = string
exclusions = set(string)
})), [])
}))
| `[]` | no | -| [custom\_cluster\_policies](#input\_custom\_cluster\_policies) | Provides an ability to create custom cluster policy, assign it to cluster and grant CAN\_USE permissions on it to certain custom groups
name - name of custom cluster policy to create
can\_use - list of string, where values are custom group names, there groups have to be created with Terraform;
definition - JSON document expressed in Databricks Policy Definition Language. No need to call 'jsonencode()' function on it when providing a value; |
list(object({
name = string
can_use = list(string)
definition = any
}))
|
[
{
"can_use": null,
"definition": null,
"name": null
}
]
| no | -| [custom\_config](#input\_custom\_config) | Map of AD databricks workspace custom config | `map(string)` |
{
"enable-X-Content-Type-Options": "true",
"enable-X-Frame-Options": "true",
"enable-X-XSS-Protection": "true",
"enableDbfsFileBrowser": "false",
"enableExportNotebook": "false",
"enableIpAccessLists": "true",
"enableNotebookTableClipboard": "false",
"enableResultsDownloading": "false",
"enableUploadDataUis": "false",
"enableVerboseAuditLogs": "true",
"enforceUserIsolation": "true",
"storeInteractiveNotebookResultsInCustomerAccount": "true"
}
| no | -| [default\_cluster\_policies\_override](#input\_default\_cluster\_policies\_override) | Provides an ability to override default cluster policy
name - name of cluster policy to override
family\_id - family id of corresponding policy
definition - JSON document expressed in Databricks Policy Definition Language. No need to call 'jsonencode()' function on it when providing a value; |
list(object({
name = string
family_id = string
definition = any
}))
|
[
{
"definition": null,
"family_id": null,
"name": null
}
]
| no | -| [iam\_account\_groups](#input\_iam\_account\_groups) | List of objects with group name and entitlements for this group |
list(object({
group_name = optional(string)
entitlements = optional(list(string))
}))
| `[]` | no | -| [iam\_workspace\_groups](#input\_iam\_workspace\_groups) | Used to create workspace group. Map of group name and its parameters, such as users and service principals added to the group. Also possible to configure group entitlements. |
map(object({
user = optional(list(string))
service_principal = optional(list(string))
entitlements = optional(list(string))
}))
| `{}` | no | -| [ip\_addresses](#input\_ip\_addresses) | A map of IP address ranges | `map(string)` |
{
"all": "0.0.0.0/0"
}
| no | -| [key\_vault\_secret\_scope](#input\_key\_vault\_secret\_scope) | Object with Azure Key Vault parameters required for creation of Azure-backed Databricks Secret scope |
list(object({
name = string
key_vault_id = string
dns_name = string
tenant_id = string
}))
| `[]` | no | -| [mount\_configuration](#input\_mount\_configuration) | Configuration for mounting storage, including only service principal details |
object({
service_principal = object({
client_id = string
client_secret = string
tenant_id = string
})
})
|
{
"service_principal": {
"client_id": null,
"client_secret": null,
"tenant_id": null
}
}
| no | +| [clusters](#input\_clusters) | Set of objects with parameters to configure Databricks clusters and assign permissions to it for certain custom groups |
set(object({
cluster_name = string
spark_version = optional(string, "15.3.x-scala2.12")
spark_conf = optional(map(any), {})
spark_env_vars = optional(map(any), {})
data_security_mode = optional(string, "USER_ISOLATION")
aws_attributes = optional(object({
availability = optional(string)
zone_id = optional(string)
first_on_demand = optional(number)
spot_bid_price_percent = optional(number)
ebs_volume_count = optional(number)
ebs_volume_size = optional(number)
ebs_volume_type = optional(string)
}), {
availability = "ON_DEMAND"
zone_id = "auto"
first_on_demand = 0
spot_bid_price_percent = 100
ebs_volume_count = 1
ebs_volume_size = 100
ebs_volume_type = "GENERAL_PURPOSE_SSD"
})
azure_attributes = optional(object({
availability = optional(string)
first_on_demand = optional(number)
spot_bid_max_price = optional(number, 1)
}), {
availability = "ON_DEMAND_AZURE"
first_on_demand = 0
})
node_type_id = optional(string, null)
autotermination_minutes = optional(number, 20)
min_workers = optional(number, 1)
max_workers = optional(number, 2)
cluster_log_conf_destination = optional(string, null)
init_scripts_workspace = optional(set(string), [])
init_scripts_volumes = optional(set(string), [])
init_scripts_dbfs = optional(set(string), [])
init_scripts_abfss = optional(set(string), [])
single_user_name = optional(string, null)
single_node_enable = optional(bool, false)
custom_tags = optional(map(string), {})
permissions = optional(set(object({
group_name = string
permission_level = string
})), [])
pypi_library_repository = optional(set(string), [])
maven_library_repository = optional(set(object({
coordinates = string
exclusions = set(string)
})), [])
}))
| `[]` | no | +| [custom\_cluster\_policies](#input\_custom\_cluster\_policies) | Provides an ability to create custom cluster policy, assign it to cluster and grant CAN\_USE permissions on it to certain custom groups
name - name of custom cluster policy to create
can\_use - list of string, where values are custom group names, there groups have to be created with Terraform;
definition - JSON document expressed in Databricks Policy Definition Language. No need to call 'jsonencode()' function on it when providing a value; |
list(object({
name = string
can_use = list(string)
definition = any
}))
|
[
{
"can_use": null,
"definition": null,
"name": null
}
]
| no | +| [custom\_config](#input\_custom\_config) | Map of AD databricks workspace custom config | `map(string)` |
{
"enable-X-Content-Type-Options": "true",
"enable-X-Frame-Options": "true",
"enable-X-XSS-Protection": "true",
"enableDbfsFileBrowser": "false",
"enableExportNotebook": "false",
"enableIpAccessLists": "true",
"enableNotebookTableClipboard": "false",
"enableResultsDownloading": "false",
"enableUploadDataUis": "false",
"enableVerboseAuditLogs": "true",
"enforceUserIsolation": "true",
"storeInteractiveNotebookResultsInCustomerAccount": "true"
}
| no | +| [default\_cluster\_policies\_override](#input\_default\_cluster\_policies\_override) | Provides an ability to override default cluster policy
name - name of cluster policy to override
family\_id - family id of corresponding policy
definition - JSON document expressed in Databricks Policy Definition Language. No need to call 'jsonencode()' function on it when providing a value; |
list(object({
name = string
family_id = string
definition = any
}))
|
[
{
"definition": null,
"family_id": null,
"name": null
}
]
| no | +| [iam\_account\_groups](#input\_iam\_account\_groups) | List of objects with group name and entitlements for this group |
list(object({
group_name = optional(string)
entitlements = optional(list(string))
}))
| `[]` | no | +| [iam\_workspace\_groups](#input\_iam\_workspace\_groups) | Used to create workspace group. Map of group name and its parameters, such as users and service principals added to the group. Also possible to configure group entitlements. |
map(object({
user = optional(list(string))
service_principal = optional(list(string))
entitlements = optional(list(string))
}))
| `{}` | no | +| [ip\_addresses](#input\_ip\_addresses) | A map of IP address ranges | `map(string)` |
{
"all": "0.0.0.0/0"
}
| no | +| [key\_vault\_secret\_scope](#input\_key\_vault\_secret\_scope) | Object with Azure Key Vault parameters required for creation of Azure-backed Databricks Secret scope |
list(object({
name = string
key_vault_id = string
dns_name = string
tenant_id = string
}))
| `[]` | no | +| [lakebase\_instance](#input\_lakebase\_instance) | Map of objects with parameters to configure and deploy OLTP database instances in Databricks.
To deploy and use an OLTP database instance in Databricks:
- You must be a Databricks workspace owner.
- A Databricks workspace must already be deployed in your cloud environment (e.g., AWS or Azure).
- The workspace must be on the Premium plan or above.
- You must enable the "Lakebase: Managed Postgres OLTP Database" feature in the Preview features section.
- Database instances can only be deleted manually through the Databricks UI or using the Databricks CLI with the --purge option. |
map(object({
name = string
capacity = optional(string, "CU_1")
node_count = optional(number, 1)
enable_readable_secondaries = optional(bool, false)
retention_window_in_days = optional(number, 7)
}))
| `{}` | no | +| [mount\_configuration](#input\_mount\_configuration) | Configuration for mounting storage, including only service principal details |
object({
service_principal = object({
client_id = string
client_secret = string
tenant_id = string
})
})
|
{
"service_principal": {
"client_id": null,
"client_secret": null,
"tenant_id": null
}
}
| no | | [mount\_enabled](#input\_mount\_enabled) | Boolean flag that determines whether mount point for storage account filesystem is created | `bool` | `false` | no | -| [mountpoints](#input\_mountpoints) | Mountpoints for databricks |
map(object({
storage_account_name = string
container_name = string
}))
| `{}` | no | +| [mountpoints](#input\_mountpoints) | Mountpoints for databricks |
map(object({
storage_account_name = string
container_name = string
}))
| `{}` | no | | [pat\_token\_lifetime\_seconds](#input\_pat\_token\_lifetime\_seconds) | The lifetime of the token, in seconds. If no lifetime is specified, the token remains valid indefinitely | `number` | `315569520` | no | -| [secret\_scope](#input\_secret\_scope) | Provides an ability to create custom Secret Scope, store secrets in it and assigning ACL for access management
scope\_name - name of Secret Scope to create;
acl - list of objects, where 'principal' custom group name, this group is created in 'Premium' module; 'permission' is one of "READ", "WRITE", "MANAGE";
secrets - list of objects, where object's 'key' param is created key name and 'string\_value' is a value for it; |
list(object({
scope_name = string
scope_acl = optional(list(object({
principal = string
permission = string
})))
secrets = optional(list(object({
key = string
string_value = string
})))
}))
| `[]` | no | -| [sql\_endpoint](#input\_sql\_endpoint) | Set of objects with parameters to configure SQL Endpoint and assign permissions to it for certain custom groups |
set(object({
name = string
cluster_size = optional(string, "2X-Small")
min_num_clusters = optional(number, 0)
max_num_clusters = optional(number, 1)
auto_stop_mins = optional(string, "30")
enable_photon = optional(bool, false)
enable_serverless_compute = optional(bool, false)
spot_instance_policy = optional(string, "COST_OPTIMIZED")
warehouse_type = optional(string, "PRO")
permissions = optional(set(object({
group_name = string
permission_level = string
})), [])
}))
| `[]` | no | +| [secret\_scope](#input\_secret\_scope) | Provides an ability to create custom Secret Scope, store secrets in it and assigning ACL for access management
scope\_name - name of Secret Scope to create;
acl - list of objects, where 'principal' custom group name, this group is created in 'Premium' module; 'permission' is one of "READ", "WRITE", "MANAGE";
secrets - list of objects, where object's 'key' param is created key name and 'string\_value' is a value for it; |
list(object({
scope_name = string
scope_acl = optional(list(object({
principal = string
permission = string
})))
secrets = optional(list(object({
key = string
string_value = string
})))
}))
| `[]` | no | +| [sql\_endpoint](#input\_sql\_endpoint) | Set of objects with parameters to configure SQL Endpoint and assign permissions to it for certain custom groups |
set(object({
name = string
cluster_size = optional(string, "2X-Small")
min_num_clusters = optional(number, 0)
max_num_clusters = optional(number, 1)
auto_stop_mins = optional(string, "30")
enable_photon = optional(bool, false)
enable_serverless_compute = optional(bool, false)
spot_instance_policy = optional(string, "COST_OPTIMIZED")
warehouse_type = optional(string, "PRO")
permissions = optional(set(object({
group_name = string
permission_level = string
})), [])
}))
| `[]` | no | | [suffix](#input\_suffix) | Optional suffix that would be added to the end of resources names. | `string` | `""` | no | -| [system\_schemas](#input\_system\_schemas) | Set of strings with all possible System Schema names | `set(string)` |
[
"access",
"compute",
"marketplace",
"storage",
"serving",
"query",
"lakeflow"
]
| no | +| [system\_schemas](#input\_system\_schemas) | Set of strings with all possible System Schema names | `set(string)` |
[
"access",
"compute",
"marketplace",
"storage",
"serving",
"query",
"lakeflow"
]
| no | | [system\_schemas\_enabled](#input\_system\_schemas\_enabled) | System Schemas only works with assigned Unity Catalog Metastore. Boolean flag to enabled this feature | `bool` | `false` | no | | [workspace\_admin\_token\_enabled](#input\_workspace\_admin\_token\_enabled) | Boolean flag to specify whether to create Workspace Admin Token | `bool` | n/a | yes | diff --git a/lakebase.tf b/lakebase.tf new file mode 100644 index 0000000..bf54323 --- /dev/null +++ b/lakebase.tf @@ -0,0 +1,9 @@ +resource "databricks_database_instance" "this" { + for_each = var.lakebase_instance + + name = each.value.name + capacity = each.value.capacity + node_count = each.value.node_count + enable_readable_secondaries = each.value.enable_readable_secondaries + retention_window_in_days = each.value.retention_window_in_days +} diff --git a/variables.tf b/variables.tf index daa0954..15e4f59 100644 --- a/variables.tf +++ b/variables.tf @@ -272,3 +272,23 @@ variable "ip_addresses" { "all" = "0.0.0.0/0" } } + +variable "lakebase_instance" { + type = map(object({ + name = string + capacity = optional(string, "CU_1") + node_count = optional(number, 1) + enable_readable_secondaries = optional(bool, false) + retention_window_in_days = optional(number, 7) + })) + default = {} + description = < Date: Thu, 24 Jul 2025 13:19:07 +0000 Subject: [PATCH 2/2] terraform-docs: automated action --- README.md | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index bb1f358..3548c60 100644 --- a/README.md +++ b/README.md @@ -399,23 +399,23 @@ No modules. | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| | [cloud\_name](#input\_cloud\_name) | Cloud Name | `string` | n/a | yes | -| [clusters](#input\_clusters) | Set of objects with parameters to configure Databricks clusters and assign permissions to it for certain custom groups |
set(object({
cluster_name = string
spark_version = optional(string, "15.3.x-scala2.12")
spark_conf = optional(map(any), {})
spark_env_vars = optional(map(any), {})
data_security_mode = optional(string, "USER_ISOLATION")
aws_attributes = optional(object({
availability = optional(string)
zone_id = optional(string)
first_on_demand = optional(number)
spot_bid_price_percent = optional(number)
ebs_volume_count = optional(number)
ebs_volume_size = optional(number)
ebs_volume_type = optional(string)
}), {
availability = "ON_DEMAND"
zone_id = "auto"
first_on_demand = 0
spot_bid_price_percent = 100
ebs_volume_count = 1
ebs_volume_size = 100
ebs_volume_type = "GENERAL_PURPOSE_SSD"
})
azure_attributes = optional(object({
availability = optional(string)
first_on_demand = optional(number)
spot_bid_max_price = optional(number, 1)
}), {
availability = "ON_DEMAND_AZURE"
first_on_demand = 0
})
node_type_id = optional(string, null)
autotermination_minutes = optional(number, 20)
min_workers = optional(number, 1)
max_workers = optional(number, 2)
cluster_log_conf_destination = optional(string, null)
init_scripts_workspace = optional(set(string), [])
init_scripts_volumes = optional(set(string), [])
init_scripts_dbfs = optional(set(string), [])
init_scripts_abfss = optional(set(string), [])
single_user_name = optional(string, null)
single_node_enable = optional(bool, false)
custom_tags = optional(map(string), {})
permissions = optional(set(object({
group_name = string
permission_level = string
})), [])
pypi_library_repository = optional(set(string), [])
maven_library_repository = optional(set(object({
coordinates = string
exclusions = set(string)
})), [])
}))
| `[]` | no | -| [custom\_cluster\_policies](#input\_custom\_cluster\_policies) | Provides an ability to create custom cluster policy, assign it to cluster and grant CAN\_USE permissions on it to certain custom groups
name - name of custom cluster policy to create
can\_use - list of string, where values are custom group names, there groups have to be created with Terraform;
definition - JSON document expressed in Databricks Policy Definition Language. No need to call 'jsonencode()' function on it when providing a value; |
list(object({
name = string
can_use = list(string)
definition = any
}))
|
[
{
"can_use": null,
"definition": null,
"name": null
}
]
| no | -| [custom\_config](#input\_custom\_config) | Map of AD databricks workspace custom config | `map(string)` |
{
"enable-X-Content-Type-Options": "true",
"enable-X-Frame-Options": "true",
"enable-X-XSS-Protection": "true",
"enableDbfsFileBrowser": "false",
"enableExportNotebook": "false",
"enableIpAccessLists": "true",
"enableNotebookTableClipboard": "false",
"enableResultsDownloading": "false",
"enableUploadDataUis": "false",
"enableVerboseAuditLogs": "true",
"enforceUserIsolation": "true",
"storeInteractiveNotebookResultsInCustomerAccount": "true"
}
| no | -| [default\_cluster\_policies\_override](#input\_default\_cluster\_policies\_override) | Provides an ability to override default cluster policy
name - name of cluster policy to override
family\_id - family id of corresponding policy
definition - JSON document expressed in Databricks Policy Definition Language. No need to call 'jsonencode()' function on it when providing a value; |
list(object({
name = string
family_id = string
definition = any
}))
|
[
{
"definition": null,
"family_id": null,
"name": null
}
]
| no | -| [iam\_account\_groups](#input\_iam\_account\_groups) | List of objects with group name and entitlements for this group |
list(object({
group_name = optional(string)
entitlements = optional(list(string))
}))
| `[]` | no | -| [iam\_workspace\_groups](#input\_iam\_workspace\_groups) | Used to create workspace group. Map of group name and its parameters, such as users and service principals added to the group. Also possible to configure group entitlements. |
map(object({
user = optional(list(string))
service_principal = optional(list(string))
entitlements = optional(list(string))
}))
| `{}` | no | -| [ip\_addresses](#input\_ip\_addresses) | A map of IP address ranges | `map(string)` |
{
"all": "0.0.0.0/0"
}
| no | -| [key\_vault\_secret\_scope](#input\_key\_vault\_secret\_scope) | Object with Azure Key Vault parameters required for creation of Azure-backed Databricks Secret scope |
list(object({
name = string
key_vault_id = string
dns_name = string
tenant_id = string
}))
| `[]` | no | -| [lakebase\_instance](#input\_lakebase\_instance) | Map of objects with parameters to configure and deploy OLTP database instances in Databricks.
To deploy and use an OLTP database instance in Databricks:
- You must be a Databricks workspace owner.
- A Databricks workspace must already be deployed in your cloud environment (e.g., AWS or Azure).
- The workspace must be on the Premium plan or above.
- You must enable the "Lakebase: Managed Postgres OLTP Database" feature in the Preview features section.
- Database instances can only be deleted manually through the Databricks UI or using the Databricks CLI with the --purge option. |
map(object({
name = string
capacity = optional(string, "CU_1")
node_count = optional(number, 1)
enable_readable_secondaries = optional(bool, false)
retention_window_in_days = optional(number, 7)
}))
| `{}` | no | -| [mount\_configuration](#input\_mount\_configuration) | Configuration for mounting storage, including only service principal details |
object({
service_principal = object({
client_id = string
client_secret = string
tenant_id = string
})
})
|
{
"service_principal": {
"client_id": null,
"client_secret": null,
"tenant_id": null
}
}
| no | +| [clusters](#input\_clusters) | Set of objects with parameters to configure Databricks clusters and assign permissions to it for certain custom groups |
set(object({
cluster_name = string
spark_version = optional(string, "15.3.x-scala2.12")
spark_conf = optional(map(any), {})
spark_env_vars = optional(map(any), {})
data_security_mode = optional(string, "USER_ISOLATION")
aws_attributes = optional(object({
availability = optional(string)
zone_id = optional(string)
first_on_demand = optional(number)
spot_bid_price_percent = optional(number)
ebs_volume_count = optional(number)
ebs_volume_size = optional(number)
ebs_volume_type = optional(string)
}), {
availability = "ON_DEMAND"
zone_id = "auto"
first_on_demand = 0
spot_bid_price_percent = 100
ebs_volume_count = 1
ebs_volume_size = 100
ebs_volume_type = "GENERAL_PURPOSE_SSD"
})
azure_attributes = optional(object({
availability = optional(string)
first_on_demand = optional(number)
spot_bid_max_price = optional(number, 1)
}), {
availability = "ON_DEMAND_AZURE"
first_on_demand = 0
})
node_type_id = optional(string, null)
autotermination_minutes = optional(number, 20)
min_workers = optional(number, 1)
max_workers = optional(number, 2)
cluster_log_conf_destination = optional(string, null)
init_scripts_workspace = optional(set(string), [])
init_scripts_volumes = optional(set(string), [])
init_scripts_dbfs = optional(set(string), [])
init_scripts_abfss = optional(set(string), [])
single_user_name = optional(string, null)
single_node_enable = optional(bool, false)
custom_tags = optional(map(string), {})
permissions = optional(set(object({
group_name = string
permission_level = string
})), [])
pypi_library_repository = optional(set(string), [])
maven_library_repository = optional(set(object({
coordinates = string
exclusions = set(string)
})), [])
}))
| `[]` | no | +| [custom\_cluster\_policies](#input\_custom\_cluster\_policies) | Provides an ability to create custom cluster policy, assign it to cluster and grant CAN\_USE permissions on it to certain custom groups
name - name of custom cluster policy to create
can\_use - list of string, where values are custom group names, there groups have to be created with Terraform;
definition - JSON document expressed in Databricks Policy Definition Language. No need to call 'jsonencode()' function on it when providing a value; |
list(object({
name = string
can_use = list(string)
definition = any
}))
|
[
{
"can_use": null,
"definition": null,
"name": null
}
]
| no | +| [custom\_config](#input\_custom\_config) | Map of AD databricks workspace custom config | `map(string)` |
{
"enable-X-Content-Type-Options": "true",
"enable-X-Frame-Options": "true",
"enable-X-XSS-Protection": "true",
"enableDbfsFileBrowser": "false",
"enableExportNotebook": "false",
"enableIpAccessLists": "true",
"enableNotebookTableClipboard": "false",
"enableResultsDownloading": "false",
"enableUploadDataUis": "false",
"enableVerboseAuditLogs": "true",
"enforceUserIsolation": "true",
"storeInteractiveNotebookResultsInCustomerAccount": "true"
}
| no | +| [default\_cluster\_policies\_override](#input\_default\_cluster\_policies\_override) | Provides an ability to override default cluster policy
name - name of cluster policy to override
family\_id - family id of corresponding policy
definition - JSON document expressed in Databricks Policy Definition Language. No need to call 'jsonencode()' function on it when providing a value; |
list(object({
name = string
family_id = string
definition = any
}))
|
[
{
"definition": null,
"family_id": null,
"name": null
}
]
| no | +| [iam\_account\_groups](#input\_iam\_account\_groups) | List of objects with group name and entitlements for this group |
list(object({
group_name = optional(string)
entitlements = optional(list(string))
}))
| `[]` | no | +| [iam\_workspace\_groups](#input\_iam\_workspace\_groups) | Used to create workspace group. Map of group name and its parameters, such as users and service principals added to the group. Also possible to configure group entitlements. |
map(object({
user = optional(list(string))
service_principal = optional(list(string))
entitlements = optional(list(string))
}))
| `{}` | no | +| [ip\_addresses](#input\_ip\_addresses) | A map of IP address ranges | `map(string)` |
{
"all": "0.0.0.0/0"
}
| no | +| [key\_vault\_secret\_scope](#input\_key\_vault\_secret\_scope) | Object with Azure Key Vault parameters required for creation of Azure-backed Databricks Secret scope |
list(object({
name = string
key_vault_id = string
dns_name = string
tenant_id = string
}))
| `[]` | no | +| [lakebase\_instance](#input\_lakebase\_instance) | Map of objects with parameters to configure and deploy OLTP database instances in Databricks.
To deploy and use an OLTP database instance in Databricks:
- You must be a Databricks workspace owner.
- A Databricks workspace must already be deployed in your cloud environment (e.g., AWS or Azure).
- The workspace must be on the Premium plan or above.
- You must enable the "Lakebase: Managed Postgres OLTP Database" feature in the Preview features section.
- Database instances can only be deleted manually through the Databricks UI or using the Databricks CLI with the --purge option. |
map(object({
name = string
capacity = optional(string, "CU_1")
node_count = optional(number, 1)
enable_readable_secondaries = optional(bool, false)
retention_window_in_days = optional(number, 7)
}))
| `{}` | no | +| [mount\_configuration](#input\_mount\_configuration) | Configuration for mounting storage, including only service principal details |
object({
service_principal = object({
client_id = string
client_secret = string
tenant_id = string
})
})
|
{
"service_principal": {
"client_id": null,
"client_secret": null,
"tenant_id": null
}
}
| no | | [mount\_enabled](#input\_mount\_enabled) | Boolean flag that determines whether mount point for storage account filesystem is created | `bool` | `false` | no | -| [mountpoints](#input\_mountpoints) | Mountpoints for databricks |
map(object({
storage_account_name = string
container_name = string
}))
| `{}` | no | +| [mountpoints](#input\_mountpoints) | Mountpoints for databricks |
map(object({
storage_account_name = string
container_name = string
}))
| `{}` | no | | [pat\_token\_lifetime\_seconds](#input\_pat\_token\_lifetime\_seconds) | The lifetime of the token, in seconds. If no lifetime is specified, the token remains valid indefinitely | `number` | `315569520` | no | -| [secret\_scope](#input\_secret\_scope) | Provides an ability to create custom Secret Scope, store secrets in it and assigning ACL for access management
scope\_name - name of Secret Scope to create;
acl - list of objects, where 'principal' custom group name, this group is created in 'Premium' module; 'permission' is one of "READ", "WRITE", "MANAGE";
secrets - list of objects, where object's 'key' param is created key name and 'string\_value' is a value for it; |
list(object({
scope_name = string
scope_acl = optional(list(object({
principal = string
permission = string
})))
secrets = optional(list(object({
key = string
string_value = string
})))
}))
| `[]` | no | -| [sql\_endpoint](#input\_sql\_endpoint) | Set of objects with parameters to configure SQL Endpoint and assign permissions to it for certain custom groups |
set(object({
name = string
cluster_size = optional(string, "2X-Small")
min_num_clusters = optional(number, 0)
max_num_clusters = optional(number, 1)
auto_stop_mins = optional(string, "30")
enable_photon = optional(bool, false)
enable_serverless_compute = optional(bool, false)
spot_instance_policy = optional(string, "COST_OPTIMIZED")
warehouse_type = optional(string, "PRO")
permissions = optional(set(object({
group_name = string
permission_level = string
})), [])
}))
| `[]` | no | +| [secret\_scope](#input\_secret\_scope) | Provides an ability to create custom Secret Scope, store secrets in it and assigning ACL for access management
scope\_name - name of Secret Scope to create;
acl - list of objects, where 'principal' custom group name, this group is created in 'Premium' module; 'permission' is one of "READ", "WRITE", "MANAGE";
secrets - list of objects, where object's 'key' param is created key name and 'string\_value' is a value for it; |
list(object({
scope_name = string
scope_acl = optional(list(object({
principal = string
permission = string
})))
secrets = optional(list(object({
key = string
string_value = string
})))
}))
| `[]` | no | +| [sql\_endpoint](#input\_sql\_endpoint) | Set of objects with parameters to configure SQL Endpoint and assign permissions to it for certain custom groups |
set(object({
name = string
cluster_size = optional(string, "2X-Small")
min_num_clusters = optional(number, 0)
max_num_clusters = optional(number, 1)
auto_stop_mins = optional(string, "30")
enable_photon = optional(bool, false)
enable_serverless_compute = optional(bool, false)
spot_instance_policy = optional(string, "COST_OPTIMIZED")
warehouse_type = optional(string, "PRO")
permissions = optional(set(object({
group_name = string
permission_level = string
})), [])
}))
| `[]` | no | | [suffix](#input\_suffix) | Optional suffix that would be added to the end of resources names. | `string` | `""` | no | -| [system\_schemas](#input\_system\_schemas) | Set of strings with all possible System Schema names | `set(string)` |
[
"access",
"compute",
"marketplace",
"storage",
"serving",
"query",
"lakeflow"
]
| no | +| [system\_schemas](#input\_system\_schemas) | Set of strings with all possible System Schema names | `set(string)` |
[
"access",
"compute",
"marketplace",
"storage",
"serving",
"query",
"lakeflow"
]
| no | | [system\_schemas\_enabled](#input\_system\_schemas\_enabled) | System Schemas only works with assigned Unity Catalog Metastore. Boolean flag to enabled this feature | `bool` | `false` | no | | [workspace\_admin\_token\_enabled](#input\_workspace\_admin\_token\_enabled) | Boolean flag to specify whether to create Workspace Admin Token | `bool` | n/a | yes |