diff --git a/acceptance/bundle/integration_whl/interactive_cluster/out.test.toml b/acceptance/bundle/integration_whl/interactive_cluster/out.test.toml index 1ae7a3995d..601e190219 100644 --- a/acceptance/bundle/integration_whl/interactive_cluster/out.test.toml +++ b/acceptance/bundle/integration_whl/interactive_cluster/out.test.toml @@ -3,4 +3,4 @@ Cloud = true CloudSlow = true [EnvMatrix] - DATABRICKS_BUNDLE_ENGINE = ["terraform"] + DATABRICKS_BUNDLE_ENGINE = ["terraform", "direct-exp"] diff --git a/acceptance/bundle/integration_whl/interactive_cluster/test.toml b/acceptance/bundle/integration_whl/interactive_cluster/test.toml deleted file mode 100644 index 4b60f66ec8..0000000000 --- a/acceptance/bundle/integration_whl/interactive_cluster/test.toml +++ /dev/null @@ -1 +0,0 @@ -EnvMatrix.DATABRICKS_BUNDLE_ENGINE = ["terraform"] # clusters resource diff --git a/acceptance/bundle/integration_whl/interactive_cluster_dynamic_version/out.test.toml b/acceptance/bundle/integration_whl/interactive_cluster_dynamic_version/out.test.toml index f97d9ed887..e31a903bd2 100644 --- a/acceptance/bundle/integration_whl/interactive_cluster_dynamic_version/out.test.toml +++ b/acceptance/bundle/integration_whl/interactive_cluster_dynamic_version/out.test.toml @@ -3,5 +3,5 @@ Cloud = true CloudSlow = true [EnvMatrix] - DATABRICKS_BUNDLE_ENGINE = ["terraform"] + DATABRICKS_BUNDLE_ENGINE = ["terraform", "direct-exp"] DATA_SECURITY_MODE = ["USER_ISOLATION", "SINGLE_USER"] diff --git a/acceptance/bundle/integration_whl/interactive_cluster_dynamic_version/test.toml b/acceptance/bundle/integration_whl/interactive_cluster_dynamic_version/test.toml index 01ca313e09..35f395f3c4 100644 --- a/acceptance/bundle/integration_whl/interactive_cluster_dynamic_version/test.toml +++ b/acceptance/bundle/integration_whl/interactive_cluster_dynamic_version/test.toml @@ -1,5 +1,3 @@ -EnvMatrix.DATABRICKS_BUNDLE_ENGINE = ["terraform"] # clusters not implemented - [EnvMatrix] DATA_SECURITY_MODE = [ "USER_ISOLATION", diff --git a/acceptance/bundle/integration_whl/interactive_single_user/out.test.toml b/acceptance/bundle/integration_whl/interactive_single_user/out.test.toml index 1ae7a3995d..601e190219 100644 --- a/acceptance/bundle/integration_whl/interactive_single_user/out.test.toml +++ b/acceptance/bundle/integration_whl/interactive_single_user/out.test.toml @@ -3,4 +3,4 @@ Cloud = true CloudSlow = true [EnvMatrix] - DATABRICKS_BUNDLE_ENGINE = ["terraform"] + DATABRICKS_BUNDLE_ENGINE = ["terraform", "direct-exp"] diff --git a/acceptance/bundle/integration_whl/interactive_single_user/test.toml b/acceptance/bundle/integration_whl/interactive_single_user/test.toml deleted file mode 100644 index 4b60f66ec8..0000000000 --- a/acceptance/bundle/integration_whl/interactive_single_user/test.toml +++ /dev/null @@ -1 +0,0 @@ -EnvMatrix.DATABRICKS_BUNDLE_ENGINE = ["terraform"] # clusters resource diff --git a/acceptance/bundle/override/clusters/output.txt b/acceptance/bundle/override/clusters/output.txt index 7a35dfe256..1c26581d39 100644 --- a/acceptance/bundle/override/clusters/output.txt +++ b/acceptance/bundle/override/clusters/output.txt @@ -5,6 +5,7 @@ "max_workers": 7, "min_workers": 2 }, + "autotermination_minutes": 60, "cluster_name": "foo", "node_type_id": "[NODE_TYPE_ID]", "num_workers": 2, @@ -20,6 +21,7 @@ "max_workers": 3, "min_workers": 1 }, + "autotermination_minutes": 60, "cluster_name": "foo-override", "node_type_id": "m5.xlarge", "num_workers": 3, diff --git a/acceptance/bundle/override/merge-string-map/output.txt b/acceptance/bundle/override/merge-string-map/output.txt index 20c3bcddc1..63a7a4d769 100644 --- a/acceptance/bundle/override/merge-string-map/output.txt +++ b/acceptance/bundle/override/merge-string-map/output.txt @@ -7,6 +7,7 @@ Warning: expected map, found string { "clusters": { "my_cluster": { + "autotermination_minutes": 60, "spark_version": "25" } } diff --git a/acceptance/bundle/refschema/out.fields.txt b/acceptance/bundle/refschema/out.fields.txt index 92b66ab7ef..f2a85bdd9f 100644 --- a/acceptance/bundle/refschema/out.fields.txt +++ b/acceptance/bundle/refschema/out.fields.txt @@ -140,6 +140,262 @@ resources.apps.*.updater string ALL resources.apps.*.url string ALL resources.apps.*.user_api_scopes []string ALL resources.apps.*.user_api_scopes[*] string ALL +resources.clusters.*.apply_policy_default_values bool INPUT STATE +resources.clusters.*.autoscale *compute.AutoScale ALL +resources.clusters.*.autoscale.max_workers int ALL +resources.clusters.*.autoscale.min_workers int ALL +resources.clusters.*.autotermination_minutes int ALL +resources.clusters.*.aws_attributes *compute.AwsAttributes ALL +resources.clusters.*.aws_attributes.availability compute.AwsAvailability ALL +resources.clusters.*.aws_attributes.ebs_volume_count int ALL +resources.clusters.*.aws_attributes.ebs_volume_iops int ALL +resources.clusters.*.aws_attributes.ebs_volume_size int ALL +resources.clusters.*.aws_attributes.ebs_volume_throughput int ALL +resources.clusters.*.aws_attributes.ebs_volume_type compute.EbsVolumeType ALL +resources.clusters.*.aws_attributes.first_on_demand int ALL +resources.clusters.*.aws_attributes.instance_profile_arn string ALL +resources.clusters.*.aws_attributes.spot_bid_price_percent int ALL +resources.clusters.*.aws_attributes.zone_id string ALL +resources.clusters.*.azure_attributes *compute.AzureAttributes ALL +resources.clusters.*.azure_attributes.availability compute.AzureAvailability ALL +resources.clusters.*.azure_attributes.first_on_demand int ALL +resources.clusters.*.azure_attributes.log_analytics_info *compute.LogAnalyticsInfo ALL +resources.clusters.*.azure_attributes.log_analytics_info.log_analytics_primary_key string ALL +resources.clusters.*.azure_attributes.log_analytics_info.log_analytics_workspace_id string ALL +resources.clusters.*.azure_attributes.spot_bid_max_price float64 ALL +resources.clusters.*.cluster_cores float64 REMOTE +resources.clusters.*.cluster_id string REMOTE +resources.clusters.*.cluster_log_conf *compute.ClusterLogConf ALL +resources.clusters.*.cluster_log_conf.dbfs *compute.DbfsStorageInfo ALL +resources.clusters.*.cluster_log_conf.dbfs.destination string ALL +resources.clusters.*.cluster_log_conf.s3 *compute.S3StorageInfo ALL +resources.clusters.*.cluster_log_conf.s3.canned_acl string ALL +resources.clusters.*.cluster_log_conf.s3.destination string ALL +resources.clusters.*.cluster_log_conf.s3.enable_encryption bool ALL +resources.clusters.*.cluster_log_conf.s3.encryption_type string ALL +resources.clusters.*.cluster_log_conf.s3.endpoint string ALL +resources.clusters.*.cluster_log_conf.s3.kms_key string ALL +resources.clusters.*.cluster_log_conf.s3.region string ALL +resources.clusters.*.cluster_log_conf.volumes *compute.VolumesStorageInfo ALL +resources.clusters.*.cluster_log_conf.volumes.destination string ALL +resources.clusters.*.cluster_log_status *compute.LogSyncStatus REMOTE +resources.clusters.*.cluster_log_status.last_attempted int64 REMOTE +resources.clusters.*.cluster_log_status.last_exception string REMOTE +resources.clusters.*.cluster_memory_mb int64 REMOTE +resources.clusters.*.cluster_name string ALL +resources.clusters.*.cluster_source compute.ClusterSource REMOTE +resources.clusters.*.creator_user_name string REMOTE +resources.clusters.*.custom_tags map[string]string ALL +resources.clusters.*.custom_tags.* string ALL +resources.clusters.*.data_security_mode compute.DataSecurityMode ALL +resources.clusters.*.default_tags map[string]string REMOTE +resources.clusters.*.default_tags.* string REMOTE +resources.clusters.*.docker_image *compute.DockerImage ALL +resources.clusters.*.docker_image.basic_auth *compute.DockerBasicAuth ALL +resources.clusters.*.docker_image.basic_auth.password string ALL +resources.clusters.*.docker_image.basic_auth.username string ALL +resources.clusters.*.docker_image.url string ALL +resources.clusters.*.driver *compute.SparkNode REMOTE +resources.clusters.*.driver.host_private_ip string REMOTE +resources.clusters.*.driver.instance_id string REMOTE +resources.clusters.*.driver.node_aws_attributes *compute.SparkNodeAwsAttributes REMOTE +resources.clusters.*.driver.node_aws_attributes.is_spot bool REMOTE +resources.clusters.*.driver.node_id string REMOTE +resources.clusters.*.driver.private_ip string REMOTE +resources.clusters.*.driver.public_dns string REMOTE +resources.clusters.*.driver.start_timestamp int64 REMOTE +resources.clusters.*.driver_instance_pool_id string ALL +resources.clusters.*.driver_node_type_id string ALL +resources.clusters.*.enable_elastic_disk bool ALL +resources.clusters.*.enable_local_disk_encryption bool ALL +resources.clusters.*.executors []compute.SparkNode REMOTE +resources.clusters.*.executors[*] compute.SparkNode REMOTE +resources.clusters.*.executors[*].host_private_ip string REMOTE +resources.clusters.*.executors[*].instance_id string REMOTE +resources.clusters.*.executors[*].node_aws_attributes *compute.SparkNodeAwsAttributes REMOTE +resources.clusters.*.executors[*].node_aws_attributes.is_spot bool REMOTE +resources.clusters.*.executors[*].node_id string REMOTE +resources.clusters.*.executors[*].private_ip string REMOTE +resources.clusters.*.executors[*].public_dns string REMOTE +resources.clusters.*.executors[*].start_timestamp int64 REMOTE +resources.clusters.*.gcp_attributes *compute.GcpAttributes ALL +resources.clusters.*.gcp_attributes.availability compute.GcpAvailability ALL +resources.clusters.*.gcp_attributes.boot_disk_size int ALL +resources.clusters.*.gcp_attributes.first_on_demand int ALL +resources.clusters.*.gcp_attributes.google_service_account string ALL +resources.clusters.*.gcp_attributes.local_ssd_count int ALL +resources.clusters.*.gcp_attributes.use_preemptible_executors bool ALL +resources.clusters.*.gcp_attributes.zone_id string ALL +resources.clusters.*.id string INPUT +resources.clusters.*.init_scripts []compute.InitScriptInfo ALL +resources.clusters.*.init_scripts[*] compute.InitScriptInfo ALL +resources.clusters.*.init_scripts[*].abfss *compute.Adlsgen2Info ALL +resources.clusters.*.init_scripts[*].abfss.destination string ALL +resources.clusters.*.init_scripts[*].dbfs *compute.DbfsStorageInfo ALL +resources.clusters.*.init_scripts[*].dbfs.destination string ALL +resources.clusters.*.init_scripts[*].file *compute.LocalFileInfo ALL +resources.clusters.*.init_scripts[*].file.destination string ALL +resources.clusters.*.init_scripts[*].gcs *compute.GcsStorageInfo ALL +resources.clusters.*.init_scripts[*].gcs.destination string ALL +resources.clusters.*.init_scripts[*].s3 *compute.S3StorageInfo ALL +resources.clusters.*.init_scripts[*].s3.canned_acl string ALL +resources.clusters.*.init_scripts[*].s3.destination string ALL +resources.clusters.*.init_scripts[*].s3.enable_encryption bool ALL +resources.clusters.*.init_scripts[*].s3.encryption_type string ALL +resources.clusters.*.init_scripts[*].s3.endpoint string ALL +resources.clusters.*.init_scripts[*].s3.kms_key string ALL +resources.clusters.*.init_scripts[*].s3.region string ALL +resources.clusters.*.init_scripts[*].volumes *compute.VolumesStorageInfo ALL +resources.clusters.*.init_scripts[*].volumes.destination string ALL +resources.clusters.*.init_scripts[*].workspace *compute.WorkspaceStorageInfo ALL +resources.clusters.*.init_scripts[*].workspace.destination string ALL +resources.clusters.*.instance_pool_id string ALL +resources.clusters.*.is_single_node bool ALL +resources.clusters.*.jdbc_port int REMOTE +resources.clusters.*.kind compute.Kind ALL +resources.clusters.*.last_restarted_time int64 REMOTE +resources.clusters.*.last_state_loss_time int64 REMOTE +resources.clusters.*.lifecycle resources.Lifecycle INPUT +resources.clusters.*.lifecycle.prevent_destroy bool INPUT +resources.clusters.*.modified_status string INPUT +resources.clusters.*.node_type_id string ALL +resources.clusters.*.num_workers int ALL +resources.clusters.*.permissions []resources.ClusterPermission INPUT +resources.clusters.*.permissions[*] resources.ClusterPermission INPUT +resources.clusters.*.permissions[*].group_name string INPUT +resources.clusters.*.permissions[*].level resources.ClusterPermissionLevel INPUT +resources.clusters.*.permissions[*].service_principal_name string INPUT +resources.clusters.*.permissions[*].user_name string INPUT +resources.clusters.*.policy_id string ALL +resources.clusters.*.remote_disk_throughput int ALL +resources.clusters.*.runtime_engine compute.RuntimeEngine ALL +resources.clusters.*.single_user_name string ALL +resources.clusters.*.spark_conf map[string]string ALL +resources.clusters.*.spark_conf.* string ALL +resources.clusters.*.spark_context_id int64 REMOTE +resources.clusters.*.spark_env_vars map[string]string ALL +resources.clusters.*.spark_env_vars.* string ALL +resources.clusters.*.spark_version string ALL +resources.clusters.*.spec *compute.ClusterSpec REMOTE +resources.clusters.*.spec.apply_policy_default_values bool REMOTE +resources.clusters.*.spec.autoscale *compute.AutoScale REMOTE +resources.clusters.*.spec.autoscale.max_workers int REMOTE +resources.clusters.*.spec.autoscale.min_workers int REMOTE +resources.clusters.*.spec.autotermination_minutes int REMOTE +resources.clusters.*.spec.aws_attributes *compute.AwsAttributes REMOTE +resources.clusters.*.spec.aws_attributes.availability compute.AwsAvailability REMOTE +resources.clusters.*.spec.aws_attributes.ebs_volume_count int REMOTE +resources.clusters.*.spec.aws_attributes.ebs_volume_iops int REMOTE +resources.clusters.*.spec.aws_attributes.ebs_volume_size int REMOTE +resources.clusters.*.spec.aws_attributes.ebs_volume_throughput int REMOTE +resources.clusters.*.spec.aws_attributes.ebs_volume_type compute.EbsVolumeType REMOTE +resources.clusters.*.spec.aws_attributes.first_on_demand int REMOTE +resources.clusters.*.spec.aws_attributes.instance_profile_arn string REMOTE +resources.clusters.*.spec.aws_attributes.spot_bid_price_percent int REMOTE +resources.clusters.*.spec.aws_attributes.zone_id string REMOTE +resources.clusters.*.spec.azure_attributes *compute.AzureAttributes REMOTE +resources.clusters.*.spec.azure_attributes.availability compute.AzureAvailability REMOTE +resources.clusters.*.spec.azure_attributes.first_on_demand int REMOTE +resources.clusters.*.spec.azure_attributes.log_analytics_info *compute.LogAnalyticsInfo REMOTE +resources.clusters.*.spec.azure_attributes.log_analytics_info.log_analytics_primary_key string REMOTE +resources.clusters.*.spec.azure_attributes.log_analytics_info.log_analytics_workspace_id string REMOTE +resources.clusters.*.spec.azure_attributes.spot_bid_max_price float64 REMOTE +resources.clusters.*.spec.cluster_log_conf *compute.ClusterLogConf REMOTE +resources.clusters.*.spec.cluster_log_conf.dbfs *compute.DbfsStorageInfo REMOTE +resources.clusters.*.spec.cluster_log_conf.dbfs.destination string REMOTE +resources.clusters.*.spec.cluster_log_conf.s3 *compute.S3StorageInfo REMOTE +resources.clusters.*.spec.cluster_log_conf.s3.canned_acl string REMOTE +resources.clusters.*.spec.cluster_log_conf.s3.destination string REMOTE +resources.clusters.*.spec.cluster_log_conf.s3.enable_encryption bool REMOTE +resources.clusters.*.spec.cluster_log_conf.s3.encryption_type string REMOTE +resources.clusters.*.spec.cluster_log_conf.s3.endpoint string REMOTE +resources.clusters.*.spec.cluster_log_conf.s3.kms_key string REMOTE +resources.clusters.*.spec.cluster_log_conf.s3.region string REMOTE +resources.clusters.*.spec.cluster_log_conf.volumes *compute.VolumesStorageInfo REMOTE +resources.clusters.*.spec.cluster_log_conf.volumes.destination string REMOTE +resources.clusters.*.spec.cluster_name string REMOTE +resources.clusters.*.spec.custom_tags map[string]string REMOTE +resources.clusters.*.spec.custom_tags.* string REMOTE +resources.clusters.*.spec.data_security_mode compute.DataSecurityMode REMOTE +resources.clusters.*.spec.docker_image *compute.DockerImage REMOTE +resources.clusters.*.spec.docker_image.basic_auth *compute.DockerBasicAuth REMOTE +resources.clusters.*.spec.docker_image.basic_auth.password string REMOTE +resources.clusters.*.spec.docker_image.basic_auth.username string REMOTE +resources.clusters.*.spec.docker_image.url string REMOTE +resources.clusters.*.spec.driver_instance_pool_id string REMOTE +resources.clusters.*.spec.driver_node_type_id string REMOTE +resources.clusters.*.spec.enable_elastic_disk bool REMOTE +resources.clusters.*.spec.enable_local_disk_encryption bool REMOTE +resources.clusters.*.spec.gcp_attributes *compute.GcpAttributes REMOTE +resources.clusters.*.spec.gcp_attributes.availability compute.GcpAvailability REMOTE +resources.clusters.*.spec.gcp_attributes.boot_disk_size int REMOTE +resources.clusters.*.spec.gcp_attributes.first_on_demand int REMOTE +resources.clusters.*.spec.gcp_attributes.google_service_account string REMOTE +resources.clusters.*.spec.gcp_attributes.local_ssd_count int REMOTE +resources.clusters.*.spec.gcp_attributes.use_preemptible_executors bool REMOTE +resources.clusters.*.spec.gcp_attributes.zone_id string REMOTE +resources.clusters.*.spec.init_scripts []compute.InitScriptInfo REMOTE +resources.clusters.*.spec.init_scripts[*] compute.InitScriptInfo REMOTE +resources.clusters.*.spec.init_scripts[*].abfss *compute.Adlsgen2Info REMOTE +resources.clusters.*.spec.init_scripts[*].abfss.destination string REMOTE +resources.clusters.*.spec.init_scripts[*].dbfs *compute.DbfsStorageInfo REMOTE +resources.clusters.*.spec.init_scripts[*].dbfs.destination string REMOTE +resources.clusters.*.spec.init_scripts[*].file *compute.LocalFileInfo REMOTE +resources.clusters.*.spec.init_scripts[*].file.destination string REMOTE +resources.clusters.*.spec.init_scripts[*].gcs *compute.GcsStorageInfo REMOTE +resources.clusters.*.spec.init_scripts[*].gcs.destination string REMOTE +resources.clusters.*.spec.init_scripts[*].s3 *compute.S3StorageInfo REMOTE +resources.clusters.*.spec.init_scripts[*].s3.canned_acl string REMOTE +resources.clusters.*.spec.init_scripts[*].s3.destination string REMOTE +resources.clusters.*.spec.init_scripts[*].s3.enable_encryption bool REMOTE +resources.clusters.*.spec.init_scripts[*].s3.encryption_type string REMOTE +resources.clusters.*.spec.init_scripts[*].s3.endpoint string REMOTE +resources.clusters.*.spec.init_scripts[*].s3.kms_key string REMOTE +resources.clusters.*.spec.init_scripts[*].s3.region string REMOTE +resources.clusters.*.spec.init_scripts[*].volumes *compute.VolumesStorageInfo REMOTE +resources.clusters.*.spec.init_scripts[*].volumes.destination string REMOTE +resources.clusters.*.spec.init_scripts[*].workspace *compute.WorkspaceStorageInfo REMOTE +resources.clusters.*.spec.init_scripts[*].workspace.destination string REMOTE +resources.clusters.*.spec.instance_pool_id string REMOTE +resources.clusters.*.spec.is_single_node bool REMOTE +resources.clusters.*.spec.kind compute.Kind REMOTE +resources.clusters.*.spec.node_type_id string REMOTE +resources.clusters.*.spec.num_workers int REMOTE +resources.clusters.*.spec.policy_id string REMOTE +resources.clusters.*.spec.remote_disk_throughput int REMOTE +resources.clusters.*.spec.runtime_engine compute.RuntimeEngine REMOTE +resources.clusters.*.spec.single_user_name string REMOTE +resources.clusters.*.spec.spark_conf map[string]string REMOTE +resources.clusters.*.spec.spark_conf.* string REMOTE +resources.clusters.*.spec.spark_env_vars map[string]string REMOTE +resources.clusters.*.spec.spark_env_vars.* string REMOTE +resources.clusters.*.spec.spark_version string REMOTE +resources.clusters.*.spec.ssh_public_keys []string REMOTE +resources.clusters.*.spec.ssh_public_keys[*] string REMOTE +resources.clusters.*.spec.total_initial_remote_disk_size int REMOTE +resources.clusters.*.spec.use_ml_runtime bool REMOTE +resources.clusters.*.spec.workload_type *compute.WorkloadType REMOTE +resources.clusters.*.spec.workload_type.clients compute.ClientsTypes REMOTE +resources.clusters.*.spec.workload_type.clients.jobs bool REMOTE +resources.clusters.*.spec.workload_type.clients.notebooks bool REMOTE +resources.clusters.*.ssh_public_keys []string ALL +resources.clusters.*.ssh_public_keys[*] string ALL +resources.clusters.*.start_time int64 REMOTE +resources.clusters.*.state compute.State REMOTE +resources.clusters.*.state_message string REMOTE +resources.clusters.*.terminated_time int64 REMOTE +resources.clusters.*.termination_reason *compute.TerminationReason REMOTE +resources.clusters.*.termination_reason.code compute.TerminationReasonCode REMOTE +resources.clusters.*.termination_reason.parameters map[string]string REMOTE +resources.clusters.*.termination_reason.parameters.* string REMOTE +resources.clusters.*.termination_reason.type compute.TerminationReasonType REMOTE +resources.clusters.*.total_initial_remote_disk_size int ALL +resources.clusters.*.url string INPUT +resources.clusters.*.use_ml_runtime bool ALL +resources.clusters.*.workload_type *compute.WorkloadType ALL +resources.clusters.*.workload_type.clients compute.ClientsTypes ALL +resources.clusters.*.workload_type.clients.jobs bool ALL +resources.clusters.*.workload_type.clients.notebooks bool ALL resources.database_catalogs.*.create_database_if_not_exists bool ALL resources.database_catalogs.*.database_instance_name string ALL resources.database_catalogs.*.database_name string ALL diff --git a/acceptance/bundle/resources/clusters/deploy/simple/out.test.toml b/acceptance/bundle/resources/clusters/deploy/simple/out.test.toml index 3cdb920b67..c3a1b55592 100644 --- a/acceptance/bundle/resources/clusters/deploy/simple/out.test.toml +++ b/acceptance/bundle/resources/clusters/deploy/simple/out.test.toml @@ -2,4 +2,4 @@ Local = false Cloud = true [EnvMatrix] - DATABRICKS_BUNDLE_ENGINE = ["terraform"] + DATABRICKS_BUNDLE_ENGINE = ["terraform", "direct-exp"] diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/databricks.yml.tmpl b/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/databricks.yml.tmpl new file mode 100644 index 0000000000..f3d1cbc4a7 --- /dev/null +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/databricks.yml.tmpl @@ -0,0 +1,13 @@ +bundle: + name: test-deploy-cluster-autoscale + +workspace: + root_path: ~/.bundle/$UNIQUE_NAME + +resources: + clusters: + test_cluster: + cluster_name: test-cluster-$UNIQUE_NAME + spark_version: $DEFAULT_SPARK_VERSION + node_type_id: $NODE_TYPE_ID + num_workers: 2 diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/hello_world.py b/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/hello_world.py new file mode 100644 index 0000000000..f301245e24 --- /dev/null +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/hello_world.py @@ -0,0 +1 @@ +print("Hello World!") diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/out.plan_.direct-exp.json b/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/out.plan_.direct-exp.json new file mode 100644 index 0000000000..79da574bca --- /dev/null +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/out.plan_.direct-exp.json @@ -0,0 +1,172 @@ +{ + "plan": { + "resources.clusters.test_cluster": { + "action": "create", + "new_state": { + "config": { + "autotermination_minutes": 60, + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "node_type_id": "[NODE_TYPE_ID]", + "num_workers": 2, + "spark_version": "13.3.x-snapshot-scala2.12" + } + } + } + } +} +{ + "plan": { + "resources.clusters.test_cluster": { + "action": "update", + "new_state": { + "config": { + "autoscale": { + "max_workers": 4, + "min_workers": 2 + }, + "autotermination_minutes": 60, + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "node_type_id": "[NODE_TYPE_ID]", + "spark_version": "13.3.x-snapshot-scala2.12" + } + }, + "remote_state": { + "autotermination_minutes": 60, + "cluster_id": "[CLUSTER_ID]", + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "node_type_id": "[NODE_TYPE_ID]", + "num_workers": 2, + "spark_version": "13.3.x-snapshot-scala2.12" + }, + "changes": { + "local": { + "autoscale": { + "action": "update" + }, + "num_workers": { + "action": "update" + } + } + } + } + } +} +{ + "plan": { + "resources.clusters.test_cluster": { + "action": "update", + "new_state": { + "config": { + "autoscale": { + "max_workers": 5, + "min_workers": 3 + }, + "autotermination_minutes": 60, + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "node_type_id": "[NODE_TYPE_ID]", + "spark_version": "13.3.x-snapshot-scala2.12" + } + }, + "remote_state": { + "autoscale": { + "max_workers": 4, + "min_workers": 2 + }, + "autotermination_minutes": 60, + "cluster_id": "[CLUSTER_ID]", + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "node_type_id": "[NODE_TYPE_ID]", + "spark_version": "13.3.x-snapshot-scala2.12" + }, + "changes": { + "local": { + "autoscale.max_workers": { + "action": "update" + }, + "autoscale.min_workers": { + "action": "update" + } + } + } + } + } +} +{ + "plan": { + "resources.clusters.test_cluster": { + "action": "resize", + "new_state": { + "config": { + "autoscale": { + "max_workers": 6, + "min_workers": 4 + }, + "autotermination_minutes": 60, + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "node_type_id": "[NODE_TYPE_ID]", + "spark_version": "13.3.x-snapshot-scala2.12" + } + }, + "remote_state": { + "autoscale": { + "max_workers": 5, + "min_workers": 3 + }, + "autotermination_minutes": 60, + "cluster_id": "[CLUSTER_ID]", + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "node_type_id": "[NODE_TYPE_ID]", + "spark_version": "13.3.x-snapshot-scala2.12", + "state": "RUNNING" + }, + "changes": { + "local": { + "autoscale.max_workers": { + "action": "resize" + }, + "autoscale.min_workers": { + "action": "resize" + } + } + } + } + } +} +{ + "plan": { + "resources.clusters.test_cluster": { + "action": "resize", + "new_state": { + "config": { + "autotermination_minutes": 60, + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "node_type_id": "[NODE_TYPE_ID]", + "num_workers": 3, + "spark_version": "13.3.x-snapshot-scala2.12" + } + }, + "remote_state": { + "autoscale": { + "max_workers": 6, + "min_workers": 4 + }, + "autotermination_minutes": 60, + "cluster_id": "[CLUSTER_ID]", + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "node_type_id": "[NODE_TYPE_ID]", + "spark_version": "13.3.x-snapshot-scala2.12", + "state": "RUNNING" + }, + "changes": { + "local": { + "autoscale": { + "action": "resize" + }, + "num_workers": { + "action": "resize" + } + } + } + } + } +} diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/out.plan_.direct-exp.txt b/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/out.plan_.direct-exp.txt new file mode 100644 index 0000000000..0c010688c2 --- /dev/null +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/out.plan_.direct-exp.txt @@ -0,0 +1,15 @@ +create clusters.test_cluster + +Plan: 1 to add, 0 to change, 0 to delete, 0 unchanged +update clusters.test_cluster + +Plan: 0 to add, 1 to change, 0 to delete, 0 unchanged +update clusters.test_cluster + +Plan: 0 to add, 1 to change, 0 to delete, 0 unchanged +resize clusters.test_cluster + +Plan: 0 to add, 1 to change, 0 to delete, 0 unchanged +resize clusters.test_cluster + +Plan: 0 to add, 1 to change, 0 to delete, 0 unchanged diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/out.plan_.terraform.json b/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/out.plan_.terraform.json new file mode 100644 index 0000000000..ede4bf5217 --- /dev/null +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/out.plan_.terraform.json @@ -0,0 +1,35 @@ +{ + "plan": { + "resources.clusters.test_cluster": { + "action": "create" + } + } +} +{ + "plan": { + "resources.clusters.test_cluster": { + "action": "update" + } + } +} +{ + "plan": { + "resources.clusters.test_cluster": { + "action": "update" + } + } +} +{ + "plan": { + "resources.clusters.test_cluster": { + "action": "update" + } + } +} +{ + "plan": { + "resources.clusters.test_cluster": { + "action": "update" + } + } +} diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/out.plan_.terraform.txt b/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/out.plan_.terraform.txt new file mode 100644 index 0000000000..73b40dacde --- /dev/null +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/out.plan_.terraform.txt @@ -0,0 +1,15 @@ +create clusters.test_cluster + +Plan: 1 to add, 0 to change, 0 to delete, 0 unchanged +update clusters.test_cluster + +Plan: 0 to add, 1 to change, 0 to delete, 0 unchanged +update clusters.test_cluster + +Plan: 0 to add, 1 to change, 0 to delete, 0 unchanged +update clusters.test_cluster + +Plan: 0 to add, 1 to change, 0 to delete, 0 unchanged +update clusters.test_cluster + +Plan: 0 to add, 1 to change, 0 to delete, 0 unchanged diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/out.test.toml b/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/out.test.toml new file mode 100644 index 0000000000..e092fd5ed6 --- /dev/null +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/out.test.toml @@ -0,0 +1,5 @@ +Local = true +Cloud = false + +[EnvMatrix] + DATABRICKS_BUNDLE_ENGINE = ["terraform", "direct-exp"] diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/output.txt b/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/output.txt new file mode 100644 index 0000000000..2ae2661ba8 --- /dev/null +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/output.txt @@ -0,0 +1,162 @@ + +>>> [CLI] bundle deploy +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/[UNIQUE_NAME]/files... +Deploying resources... +Updating deployment state... +Deployment complete! + +=== Cluster should exist with num_workers after bundle deployment: +{ + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "num_workers": 2, + "autoscale": null +} + +=== Adding autoscale section should call update API on stopped cluster + +>>> [CLI] bundle deploy +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/[UNIQUE_NAME]/files... +Deploying resources... +Updating deployment state... +Deployment complete! + +>>> jq select(.method == "POST" and (.path | contains("/clusters/edit"))) out.requests.txt +{ + "method": "POST", + "path": "/api/2.1/clusters/edit", + "body": { + "autoscale": { + "max_workers": 4, + "min_workers": 2 + }, + "autotermination_minutes": 60, + "cluster_id": "[CLUSTER_ID]", + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "node_type_id": "[NODE_TYPE_ID]", + "spark_version": "13.3.x-snapshot-scala2.12" + } +} + +=== Cluster should have autoscale +{ + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "num_workers": null, + "autoscale": { + "max_workers": 4, + "min_workers": 2 + } +} + +=== Changing autoscale should call update API on stopped cluster + +>>> [CLI] bundle deploy +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/[UNIQUE_NAME]/files... +Deploying resources... +Updating deployment state... +Deployment complete! + +>>> jq select(.method == "POST" and (.path | contains("/clusters/edit"))) out.requests.txt +{ + "method": "POST", + "path": "/api/2.1/clusters/edit", + "body": { + "autoscale": { + "max_workers": 5, + "min_workers": 3 + }, + "autotermination_minutes": 60, + "cluster_id": "[CLUSTER_ID]", + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "node_type_id": "[NODE_TYPE_ID]", + "spark_version": "13.3.x-snapshot-scala2.12" + } +} + +=== Cluster should have new autoscale +{ + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "num_workers": null, + "autoscale": { + "max_workers": 5, + "min_workers": 3 + } +} + +=== Starting the cluster +{ + "autoscale": { + "max_workers":5, + "min_workers":3 + }, + "autotermination_minutes":60, + "cluster_id":"[CLUSTER_ID]", + "cluster_name":"test-cluster-[UNIQUE_NAME]", + "node_type_id":"[NODE_TYPE_ID]", + "spark_version":"13.3.x-snapshot-scala2.12", + "state":"RUNNING" +} + +=== Changing autoscale should call resize API on running cluster + +>>> [CLI] bundle deploy +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/[UNIQUE_NAME]/files... +Deploying resources... +Updating deployment state... +Deployment complete! + +>>> jq select(.method == "POST" and (.path | contains("/clusters/resize"))) out.requests.txt +{ + "method": "POST", + "path": "/api/2.1/clusters/resize", + "body": { + "autoscale": { + "max_workers": 6, + "min_workers": 4 + }, + "cluster_id": "[CLUSTER_ID]" + } +} + +=== Cluster should have new autoscale +{ + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "num_workers": null, + "autoscale": { + "max_workers": 6, + "min_workers": 4 + } +} + +=== Removing autoscale section should call resize API + +>>> [CLI] bundle deploy +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/[UNIQUE_NAME]/files... +Deploying resources... +Updating deployment state... +Deployment complete! + +>>> jq select(.method == "POST" and (.path | contains("/clusters/resize"))) out.requests.txt +{ + "method": "POST", + "path": "/api/2.1/clusters/resize", + "body": { + "cluster_id": "[CLUSTER_ID]", + "num_workers": 3 + } +} + +=== Cluster should have num_workers +{ + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "num_workers": 3, + "autoscale": null +} + +>>> [CLI] bundle destroy --auto-approve +The following resources will be deleted: + delete cluster test_cluster + +All files and directories at the following location will be deleted: /Workspace/Users/[USERNAME]/.bundle/[UNIQUE_NAME] + +Deleting files... +Destroy complete! diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/script b/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/script new file mode 100755 index 0000000000..01203fca55 --- /dev/null +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/script @@ -0,0 +1,69 @@ +envsubst < databricks.yml.tmpl > databricks.yml + +cleanup() { + trace $CLI bundle destroy --auto-approve + rm out.requests.txt +} +trap cleanup EXIT + +$CLI bundle plan > out.plan_.$DATABRICKS_BUNDLE_ENGINE.txt +$CLI bundle debug plan > out.plan_.$DATABRICKS_BUNDLE_ENGINE.json +trace $CLI bundle deploy + +title "Cluster should exist with num_workers after bundle deployment:\n" +CLUSTER_ID=$($CLI bundle summary -o json | jq -r '.resources.clusters.test_cluster.id') +echo "$CLUSTER_ID:CLUSTER_ID" >> ACC_REPLS +$CLI clusters get "${CLUSTER_ID}" | jq '{cluster_name,num_workers,autoscale}' + +title "Adding autoscale section should call update API on stopped cluster\n" +update_file.py databricks.yml " num_workers: 2" " autoscale: + min_workers: 2 + max_workers: 4" +$CLI bundle plan >> out.plan_.$DATABRICKS_BUNDLE_ENGINE.txt +$CLI bundle debug plan >> out.plan_.$DATABRICKS_BUNDLE_ENGINE.json +trace $CLI bundle deploy +trace jq 'select(.method == "POST" and (.path | contains("/clusters/edit")))' out.requests.txt +rm out.requests.txt + +title "Cluster should have autoscale\n" +$CLI clusters get "${CLUSTER_ID}" | jq '{cluster_name,num_workers,autoscale}' + +title "Changing autoscale should call update API on stopped cluster\n" +update_file.py databricks.yml "min_workers: 2" "min_workers: 3" +update_file.py databricks.yml "max_workers: 4" "max_workers: 5" +$CLI bundle plan >> out.plan_.$DATABRICKS_BUNDLE_ENGINE.txt +$CLI bundle debug plan >> out.plan_.$DATABRICKS_BUNDLE_ENGINE.json +trace $CLI bundle deploy +trace jq 'select(.method == "POST" and (.path | contains("/clusters/edit")))' out.requests.txt +rm out.requests.txt + +title "Cluster should have new autoscale\n" +$CLI clusters get "${CLUSTER_ID}" | jq '{cluster_name,num_workers,autoscale}' + +title "Starting the cluster\n" +$CLI clusters start "${CLUSTER_ID}" + +title "Changing autoscale should call resize API on running cluster\n" +update_file.py databricks.yml "min_workers: 3" "min_workers: 4" +update_file.py databricks.yml "max_workers: 5" "max_workers: 6" +$CLI bundle plan >> out.plan_.$DATABRICKS_BUNDLE_ENGINE.txt +$CLI bundle debug plan >> out.plan_.$DATABRICKS_BUNDLE_ENGINE.json +trace $CLI bundle deploy +trace jq 'select(.method == "POST" and (.path | contains("/clusters/resize")))' out.requests.txt +rm out.requests.txt + +title "Cluster should have new autoscale\n" +$CLI clusters get "${CLUSTER_ID}" | jq '{cluster_name,num_workers,autoscale}' + +title "Removing autoscale section should call resize API\n" +update_file.py databricks.yml " autoscale: + min_workers: 4 + max_workers: 6" " num_workers: 3" +$CLI bundle plan >> out.plan_.$DATABRICKS_BUNDLE_ENGINE.txt +$CLI bundle debug plan >> out.plan_.$DATABRICKS_BUNDLE_ENGINE.json +trace $CLI bundle deploy +trace jq 'select(.method == "POST" and (.path | contains("/clusters/resize")))' out.requests.txt +rm out.requests.txt + +title "Cluster should have num_workers\n" +$CLI clusters get "${CLUSTER_ID}" | jq '{cluster_name,num_workers,autoscale}' diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/test.toml b/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/test.toml new file mode 100644 index 0000000000..3aa4a19050 --- /dev/null +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/test.toml @@ -0,0 +1,7 @@ +Local = true +Cloud = false +RecordRequests = true + +Ignore = [ + "databricks.yml", +] diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize/databricks.yml.tmpl b/acceptance/bundle/resources/clusters/deploy/update-and-resize/databricks.yml.tmpl new file mode 100644 index 0000000000..c32af2055f --- /dev/null +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize/databricks.yml.tmpl @@ -0,0 +1,15 @@ +bundle: + name: test-deploy-cluster-simple + +workspace: + root_path: ~/.bundle/$UNIQUE_NAME + +resources: + clusters: + test_cluster: + cluster_name: test-cluster-$UNIQUE_NAME + spark_version: $DEFAULT_SPARK_VERSION + node_type_id: $NODE_TYPE_ID + num_workers: 2 + spark_conf: + "spark.executor.memory": "2g" diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize/hello_world.py b/acceptance/bundle/resources/clusters/deploy/update-and-resize/hello_world.py new file mode 100644 index 0000000000..f301245e24 --- /dev/null +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize/hello_world.py @@ -0,0 +1 @@ +print("Hello World!") diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize/out.plan_.direct-exp.json b/acceptance/bundle/resources/clusters/deploy/update-and-resize/out.plan_.direct-exp.json new file mode 100644 index 0000000000..612bbd6d0d --- /dev/null +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize/out.plan_.direct-exp.json @@ -0,0 +1,135 @@ +{ + "plan": { + "resources.clusters.test_cluster": { + "action": "create", + "new_state": { + "config": { + "autotermination_minutes": 60, + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "node_type_id": "[NODE_TYPE_ID]", + "num_workers": 2, + "spark_conf": { + "spark.executor.memory": "2g" + }, + "spark_version": "13.3.x-snapshot-scala2.12" + } + } + } + } +} +{ + "plan": { + "resources.clusters.test_cluster": { + "action": "update", + "new_state": { + "config": { + "autotermination_minutes": 60, + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "node_type_id": "[NODE_TYPE_ID]", + "num_workers": 3, + "spark_conf": { + "spark.executor.memory": "2g" + }, + "spark_version": "13.3.x-snapshot-scala2.12" + } + }, + "remote_state": { + "autotermination_minutes": 60, + "cluster_id": "[CLUSTER_ID]", + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "node_type_id": "[NODE_TYPE_ID]", + "num_workers": 2, + "spark_conf": { + "spark.executor.memory": "2g" + }, + "spark_version": "13.3.x-snapshot-scala2.12" + }, + "changes": { + "local": { + "num_workers": { + "action": "update" + } + } + } + } + } +} +{ + "plan": { + "resources.clusters.test_cluster": { + "action": "resize", + "new_state": { + "config": { + "autotermination_minutes": 60, + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "node_type_id": "[NODE_TYPE_ID]", + "num_workers": 4, + "spark_conf": { + "spark.executor.memory": "2g" + }, + "spark_version": "13.3.x-snapshot-scala2.12" + } + }, + "remote_state": { + "autotermination_minutes": 60, + "cluster_id": "[CLUSTER_ID]", + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "node_type_id": "[NODE_TYPE_ID]", + "num_workers": 3, + "spark_conf": { + "spark.executor.memory": "2g" + }, + "spark_version": "13.3.x-snapshot-scala2.12", + "state": "RUNNING" + }, + "changes": { + "local": { + "num_workers": { + "action": "resize" + } + } + } + } + } +} +{ + "plan": { + "resources.clusters.test_cluster": { + "action": "update", + "new_state": { + "config": { + "autotermination_minutes": 60, + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "node_type_id": "[NODE_TYPE_ID]", + "num_workers": 5, + "spark_conf": { + "spark.executor.memory": "4g" + }, + "spark_version": "13.3.x-snapshot-scala2.12" + } + }, + "remote_state": { + "autotermination_minutes": 60, + "cluster_id": "[CLUSTER_ID]", + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "node_type_id": "[NODE_TYPE_ID]", + "num_workers": 4, + "spark_conf": { + "spark.executor.memory": "2g" + }, + "spark_version": "13.3.x-snapshot-scala2.12", + "state": "RUNNING" + }, + "changes": { + "local": { + "num_workers": { + "action": "resize" + }, + "spark_conf['spark.executor.memory']": { + "action": "update" + } + } + } + } + } +} diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize/out.plan_.direct-exp.txt b/acceptance/bundle/resources/clusters/deploy/update-and-resize/out.plan_.direct-exp.txt new file mode 100644 index 0000000000..70123004a2 --- /dev/null +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize/out.plan_.direct-exp.txt @@ -0,0 +1,12 @@ +create clusters.test_cluster + +Plan: 1 to add, 0 to change, 0 to delete, 0 unchanged +update clusters.test_cluster + +Plan: 0 to add, 1 to change, 0 to delete, 0 unchanged +resize clusters.test_cluster + +Plan: 0 to add, 1 to change, 0 to delete, 0 unchanged +update clusters.test_cluster + +Plan: 0 to add, 1 to change, 0 to delete, 0 unchanged diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize/out.plan_.terraform.json b/acceptance/bundle/resources/clusters/deploy/update-and-resize/out.plan_.terraform.json new file mode 100644 index 0000000000..0b0dc4dd9a --- /dev/null +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize/out.plan_.terraform.json @@ -0,0 +1,28 @@ +{ + "plan": { + "resources.clusters.test_cluster": { + "action": "create" + } + } +} +{ + "plan": { + "resources.clusters.test_cluster": { + "action": "update" + } + } +} +{ + "plan": { + "resources.clusters.test_cluster": { + "action": "update" + } + } +} +{ + "plan": { + "resources.clusters.test_cluster": { + "action": "update" + } + } +} diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize/out.plan_.terraform.txt b/acceptance/bundle/resources/clusters/deploy/update-and-resize/out.plan_.terraform.txt new file mode 100644 index 0000000000..d51cd2abf0 --- /dev/null +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize/out.plan_.terraform.txt @@ -0,0 +1,12 @@ +create clusters.test_cluster + +Plan: 1 to add, 0 to change, 0 to delete, 0 unchanged +update clusters.test_cluster + +Plan: 0 to add, 1 to change, 0 to delete, 0 unchanged +update clusters.test_cluster + +Plan: 0 to add, 1 to change, 0 to delete, 0 unchanged +update clusters.test_cluster + +Plan: 0 to add, 1 to change, 0 to delete, 0 unchanged diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize/out.test.toml b/acceptance/bundle/resources/clusters/deploy/update-and-resize/out.test.toml new file mode 100644 index 0000000000..e092fd5ed6 --- /dev/null +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize/out.test.toml @@ -0,0 +1,5 @@ +Local = true +Cloud = false + +[EnvMatrix] + DATABRICKS_BUNDLE_ENGINE = ["terraform", "direct-exp"] diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize/output.txt b/acceptance/bundle/resources/clusters/deploy/update-and-resize/output.txt new file mode 100644 index 0000000000..38a9f6434a --- /dev/null +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize/output.txt @@ -0,0 +1,124 @@ + +>>> [CLI] bundle deploy +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/[UNIQUE_NAME]/files... +Deploying resources... +Updating deployment state... +Deployment complete! + +=== Cluster should exist after bundle deployment: +{ + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "num_workers": 2 +} + +=== Changing num_workers should call update API on stopped cluster + +>>> [CLI] bundle deploy +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/[UNIQUE_NAME]/files... +Deploying resources... +Updating deployment state... +Deployment complete! + +>>> jq select(.method == "POST" and (.path | contains("/clusters/edit"))) out.requests.txt +{ + "method": "POST", + "path": "/api/2.1/clusters/edit", + "body": { + "autotermination_minutes": 60, + "cluster_id": "[CLUSTER_ID]", + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "node_type_id": "[NODE_TYPE_ID]", + "num_workers": 3, + "spark_conf": { + "spark.executor.memory": "2g" + }, + "spark_version": "13.3.x-snapshot-scala2.12" + } +} + +=== Cluster should have new num_workers +{ + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "num_workers": 3 +} + +=== Starting the cluster +{ + "autotermination_minutes":60, + "cluster_id":"[CLUSTER_ID]", + "cluster_name":"test-cluster-[UNIQUE_NAME]", + "node_type_id":"[NODE_TYPE_ID]", + "num_workers":3, + "spark_conf": { + "spark.executor.memory":"2g" + }, + "spark_version":"13.3.x-snapshot-scala2.12", + "state":"RUNNING" +} + +=== Changing num_workers should call resize API on running cluster + +>>> [CLI] bundle deploy +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/[UNIQUE_NAME]/files... +Deploying resources... +Updating deployment state... +Deployment complete! + +>>> jq select(.method == "POST" and (.path | contains("/clusters/resize"))) out.requests.txt +{ + "method": "POST", + "path": "/api/2.1/clusters/resize", + "body": { + "cluster_id": "[CLUSTER_ID]", + "num_workers": 4 + } +} + +=== Cluster should have new num_workers +{ + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "num_workers": 4 +} + +=== Changing num_workers and spark_conf should call update API + +>>> [CLI] bundle deploy +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/[UNIQUE_NAME]/files... +Deploying resources... +Updating deployment state... +Deployment complete! + +>>> jq select(.method == "POST" and (.path | contains("/clusters/edit"))) out.requests.txt +{ + "method": "POST", + "path": "/api/2.1/clusters/edit", + "body": { + "autotermination_minutes": 60, + "cluster_id": "[CLUSTER_ID]", + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "node_type_id": "[NODE_TYPE_ID]", + "num_workers": 5, + "spark_conf": { + "spark.executor.memory": "4g" + }, + "spark_version": "13.3.x-snapshot-scala2.12" + } +} + +=== Cluster should have new num_workers and spark_conf +{ + "cluster_name": "test-cluster-[UNIQUE_NAME]", + "num_workers": 5, + "spark_conf": { + "spark.executor.memory": "4g" + } +} + +>>> [CLI] bundle destroy --auto-approve +The following resources will be deleted: + delete cluster test_cluster + +All files and directories at the following location will be deleted: /Workspace/Users/[USERNAME]/.bundle/[UNIQUE_NAME] + +Deleting files... +Destroy complete! diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize/script b/acceptance/bundle/resources/clusters/deploy/update-and-resize/script new file mode 100644 index 0000000000..b1795f6992 --- /dev/null +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize/script @@ -0,0 +1,53 @@ +envsubst < databricks.yml.tmpl > databricks.yml + +cleanup() { + trace $CLI bundle destroy --auto-approve + rm out.requests.txt +} +trap cleanup EXIT + +$CLI bundle plan > out.plan_.$DATABRICKS_BUNDLE_ENGINE.txt +$CLI bundle debug plan > out.plan_.$DATABRICKS_BUNDLE_ENGINE.json +trace $CLI bundle deploy + +title "Cluster should exist after bundle deployment:\n" +CLUSTER_ID=$($CLI bundle summary -o json | jq -r '.resources.clusters.test_cluster.id') +echo "$CLUSTER_ID:CLUSTER_ID" >> ACC_REPLS +$CLI clusters get "${CLUSTER_ID}" | jq '{cluster_name,num_workers}' + +title "Changing num_workers should call update API on stopped cluster\n" +update_file.py databricks.yml "num_workers: 2" "num_workers: 3" +$CLI bundle plan >> out.plan_.$DATABRICKS_BUNDLE_ENGINE.txt +$CLI bundle debug plan >> out.plan_.$DATABRICKS_BUNDLE_ENGINE.json +trace $CLI bundle deploy +trace jq 'select(.method == "POST" and (.path | contains("/clusters/edit")))' out.requests.txt +rm out.requests.txt + +title "Cluster should have new num_workers\n" +$CLI clusters get "${CLUSTER_ID}" | jq '{cluster_name,num_workers}' + +title "Starting the cluster\n" +$CLI clusters start "${CLUSTER_ID}" + +title "Changing num_workers should call resize API on running cluster\n" +update_file.py databricks.yml "num_workers: 3" "num_workers: 4" +$CLI bundle plan >> out.plan_.$DATABRICKS_BUNDLE_ENGINE.txt +$CLI bundle debug plan >> out.plan_.$DATABRICKS_BUNDLE_ENGINE.json +trace $CLI bundle deploy +trace jq 'select(.method == "POST" and (.path | contains("/clusters/resize")))' out.requests.txt +rm out.requests.txt + +title "Cluster should have new num_workers\n" +$CLI clusters get "${CLUSTER_ID}" | jq '{cluster_name,num_workers}' + +title "Changing num_workers and spark_conf should call update API\n" +update_file.py databricks.yml "num_workers: 4" "num_workers: 5" +update_file.py databricks.yml '"spark.executor.memory": "2g"' '"spark.executor.memory": "4g"' +$CLI bundle plan >> out.plan_.$DATABRICKS_BUNDLE_ENGINE.txt +$CLI bundle debug plan >> out.plan_.$DATABRICKS_BUNDLE_ENGINE.json +trace $CLI bundle deploy +trace jq 'select(.method == "POST" and (.path | contains("/clusters/edit")))' out.requests.txt +rm out.requests.txt + +title "Cluster should have new num_workers and spark_conf\n" +$CLI clusters get "${CLUSTER_ID}" | jq '{cluster_name,num_workers,spark_conf}' diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize/test.toml b/acceptance/bundle/resources/clusters/deploy/update-and-resize/test.toml new file mode 100644 index 0000000000..3aa4a19050 --- /dev/null +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize/test.toml @@ -0,0 +1,7 @@ +Local = true +Cloud = false +RecordRequests = true + +Ignore = [ + "databricks.yml", +] diff --git a/acceptance/bundle/resources/clusters/run/spark_python_task/out.test.toml b/acceptance/bundle/resources/clusters/run/spark_python_task/out.test.toml index 1ae7a3995d..601e190219 100644 --- a/acceptance/bundle/resources/clusters/run/spark_python_task/out.test.toml +++ b/acceptance/bundle/resources/clusters/run/spark_python_task/out.test.toml @@ -3,4 +3,4 @@ Cloud = true CloudSlow = true [EnvMatrix] - DATABRICKS_BUNDLE_ENGINE = ["terraform"] + DATABRICKS_BUNDLE_ENGINE = ["terraform", "direct-exp"] diff --git a/acceptance/bundle/resources/clusters/test.toml b/acceptance/bundle/resources/clusters/test.toml deleted file mode 100644 index e1ff579ead..0000000000 --- a/acceptance/bundle/resources/clusters/test.toml +++ /dev/null @@ -1 +0,0 @@ -EnvMatrix.DATABRICKS_BUNDLE_ENGINE = ["terraform"] # clusters are not implemented yet diff --git a/acceptance/bundle/validate/empty_resources/empty_dict/output.txt b/acceptance/bundle/validate/empty_resources/empty_dict/output.txt index f7b3a13425..d73aff0d1b 100644 --- a/acceptance/bundle/validate/empty_resources/empty_dict/output.txt +++ b/acceptance/bundle/validate/empty_resources/empty_dict/output.txt @@ -129,7 +129,9 @@ Warning: required field "schema_name" is not set === resources.clusters.rname === { "clusters": { - "rname": {} + "rname": { + "autotermination_minutes": 60 + } } } diff --git a/acceptance/bundle/validate/empty_resources/with_grants/output.txt b/acceptance/bundle/validate/empty_resources/with_grants/output.txt index b14e5b8057..09cef7a319 100644 --- a/acceptance/bundle/validate/empty_resources/with_grants/output.txt +++ b/acceptance/bundle/validate/empty_resources/with_grants/output.txt @@ -158,7 +158,9 @@ Warning: unknown field: grants { "clusters": { - "rname": {} + "rname": { + "autotermination_minutes": 60 + } } } diff --git a/acceptance/bundle/validate/empty_resources/with_permissions/output.txt b/acceptance/bundle/validate/empty_resources/with_permissions/output.txt index 7d6a912f5e..990fcea778 100644 --- a/acceptance/bundle/validate/empty_resources/with_permissions/output.txt +++ b/acceptance/bundle/validate/empty_resources/with_permissions/output.txt @@ -146,6 +146,7 @@ Warning: required field "schema_name" is not set { "clusters": { "rname": { + "autotermination_minutes": 60, "permissions": [] } } diff --git a/bundle/config/mutator/resourcemutator/resource_mutator.go b/bundle/config/mutator/resourcemutator/resource_mutator.go index 98cbb8cb25..cf82177cae 100644 --- a/bundle/config/mutator/resourcemutator/resource_mutator.go +++ b/bundle/config/mutator/resourcemutator/resource_mutator.go @@ -92,6 +92,10 @@ func applyInitializeMutators(ctx context.Context, b *bundle.Bundle) { // Apps: {"resources.apps.*.description", ""}, + + // Clusters (same as terraform) + // https://github.com/databricks/terraform-provider-databricks/blob/v1.75.0/clusters/resource_cluster.go#L315 + {"resources.clusters.*.autotermination_minutes", 60}, } for _, defaultDef := range defaults { diff --git a/bundle/direct/apply.go b/bundle/direct/apply.go index e3dc5fcb84..c647add2f3 100644 --- a/bundle/direct/apply.go +++ b/bundle/direct/apply.go @@ -54,6 +54,8 @@ func (d *DeploymentUnit) Deploy(ctx context.Context, db *dstate.DeploymentState, return d.Update(ctx, db, oldID, newState) case deployplan.ActionTypeUpdateWithID: return d.UpdateWithID(ctx, db, oldID, newState) + case deployplan.ActionTypeResize: + return d.Resize(ctx, db, oldID, newState) default: return fmt.Errorf("internal error: unexpected actionType: %#v", actionType) } @@ -185,6 +187,20 @@ func (d *DeploymentUnit) Delete(ctx context.Context, db *dstate.DeploymentState, return nil } +func (d *DeploymentUnit) Resize(ctx context.Context, db *dstate.DeploymentState, id string, newState any) error { + err := d.Adapter.DoResize(ctx, id, newState) + if err != nil { + return fmt.Errorf("resizing id=%s: %w", id, err) + } + + err = db.SaveState(d.ResourceKey, id, newState) + if err != nil { + return fmt.Errorf("saving state id=%s: %w", id, err) + } + + return nil +} + func typeConvert(destType reflect.Type, src any) (any, error) { raw, err := json.Marshal(src) if err != nil { diff --git a/bundle/direct/bundle_plan.go b/bundle/direct/bundle_plan.go index 4c15972d23..489b14d02d 100644 --- a/bundle/direct/bundle_plan.go +++ b/bundle/direct/bundle_plan.go @@ -149,8 +149,17 @@ func (b *DeploymentBundle) CalculatePlan(ctx context.Context, client *databricks return false } - localAction, localChangeMap := convertChangesToTriggersMap(adapter, localDiff) + remoteState, err := adapter.DoRefresh(ctx, dbentry.ID) + if err != nil { + if isResourceGone(err) { + remoteState = nil + } else { + logdiag.LogError(ctx, fmt.Errorf("%s: failed to read id=%q: %w", errorPrefix, dbentry.ID, err)) + return false + } + } + localAction, localChangeMap := convertChangesToTriggersMap(ctx, adapter, localDiff, remoteState) if localAction == deployplan.ActionTypeRecreate { entry.Action = localAction.String() if len(localChangeMap) > 0 { @@ -161,16 +170,6 @@ func (b *DeploymentBundle) CalculatePlan(ctx context.Context, client *databricks return true } - remoteState, err := adapter.DoRefresh(ctx, dbentry.ID) - if err != nil { - if isResourceGone(err) { - remoteState = nil - } else { - logdiag.LogError(ctx, fmt.Errorf("%s: failed to read id=%q: %w (localAction=%q)", errorPrefix, dbentry.ID, err, localAction.String())) - return false - } - } - // We have a choice whether to include remoteState or remoteStateComparable from below. // Including remoteState because in the near future remoteState is expected to become a superset struct of remoteStateComparable entry.RemoteState = remoteState @@ -221,12 +220,16 @@ func (b *DeploymentBundle) CalculatePlan(ctx context.Context, client *databricks return plan, nil } -func convertChangesToTriggersMap(adapter *dresources.Adapter, diff []structdiff.Change) (deployplan.ActionType, map[string]deployplan.Trigger) { +func convertChangesToTriggersMap(ctx context.Context, adapter *dresources.Adapter, diff []structdiff.Change, remoteState any) (deployplan.ActionType, map[string]deployplan.Trigger) { action := deployplan.ActionTypeSkip var m map[string]deployplan.Trigger for _, ch := range diff { - fieldAction := adapter.ClassifyByTriggers(ch) + fieldAction, err := adapter.ClassifyChange(ch, remoteState) + if err != nil { + logdiag.LogError(ctx, fmt.Errorf("internal error: failed to classify change: %w", err)) + continue + } if fieldAction > action { action = fieldAction } @@ -256,10 +259,9 @@ func interpretOldStateVsRemoteState(ctx context.Context, adapter *dresources.Ada } fieldAction, err := adapter.ClassifyChange(ch, remoteState) if err != nil { - logdiag.LogError(ctx, fmt.Errorf("internal error: failed to classify changes: %w", err)) + logdiag.LogError(ctx, fmt.Errorf("internal error: failed to classify change: %w", err)) continue } - if fieldAction > action { action = fieldAction } diff --git a/bundle/direct/dresources/adapter.go b/bundle/direct/dresources/adapter.go index 2b9c0a0cfb..465eb51089 100644 --- a/bundle/direct/dresources/adapter.go +++ b/bundle/direct/dresources/adapter.go @@ -66,6 +66,10 @@ type IResourceNoRefresh interface { // Example: func (r *ResourceVolume) DoUpdateWithID(ctx, id string, newState *catalog.CreateVolumeRequestContent) (string, error) DoUpdateWithID(ctx context.Context, id string, newState any) (string, error) + // [Optional] DoResize resizes the resource. Only supported by clusters + // Example: func (r *ResourceCluster) DoResize(ctx context.Context, id string, newState *compute.ClusterSpec) error + DoResize(ctx context.Context, id string, newState any) error + // [Optional] WaitAfterCreate waits for the resource to become ready after creation. // TODO: wait status should be persisted in the state. WaitAfterCreate(ctx context.Context, newState any) error @@ -115,6 +119,7 @@ type Adapter struct { waitAfterCreate *calladapt.BoundCaller waitAfterUpdate *calladapt.BoundCaller classifyChange *calladapt.BoundCaller + doResize *calladapt.BoundCaller fieldTriggers map[string]deployplan.ActionType } @@ -140,6 +145,7 @@ func NewAdapter(typedNil any, client *databricks.WorkspaceClient) (*Adapter, err doCreate: nil, doUpdate: nil, doUpdateWithID: nil, + doResize: nil, waitAfterCreate: nil, waitAfterUpdate: nil, classifyChange: nil, @@ -230,6 +236,16 @@ func (a *Adapter) initMethods(resource any) error { return err } + a.classifyChange, err = calladapt.PrepareCall(resource, calladapt.TypeOf[IResourceNoRefresh](), "ClassifyChange") + if err != nil { + return err + } + + a.doResize, err = calladapt.PrepareCall(resource, calladapt.TypeOf[IResourceNoRefresh](), "DoResize") + if err != nil { + return err + } + return nil } @@ -290,6 +306,10 @@ func (a *Adapter) validate() error { validations = append(validations, "DoUpdate remoteState return", a.doUpdate.OutTypes[0], remoteType) } + if a.doResize != nil { + validations = append(validations, "DoResize newState", a.doResize.InTypes[2], stateType) + } + if a.doUpdateWithID != nil { validations = append(validations, "DoUpdateWithID newState", a.doUpdateWithID.InTypes[2], stateType) if len(a.doUpdateWithID.OutTypes) == 3 { @@ -312,7 +332,7 @@ func (a *Adapter) validate() error { } if a.classifyChange != nil { - validations = append(validations, "ClassifyChange changes", a.classifyChange.InTypes[1], remoteType) + validations = append(validations, "ClassifyChange remoteState", a.classifyChange.InTypes[1], remoteType) } err = validateTypes(validations...) @@ -456,6 +476,15 @@ func (a *Adapter) DoUpdateWithID(ctx context.Context, oldID string, newState any } } +func (a *Adapter) DoResize(ctx context.Context, id string, newState any) error { + if a.doResize == nil { + return errors.New("internal error: DoResize not found") + } + + _, err := a.doResize.Call(ctx, id, newState) + return err +} + // ClassifyByTriggers classifies a single using FieldTriggers. // Defaults to ActionTypeUpdate. func (a *Adapter) ClassifyByTriggers(change structdiff.Change) deployplan.ActionType { @@ -522,6 +551,10 @@ func (a *Adapter) ClassifyChange(change structdiff.Change, remoteState any) (dep } actionType := outs[0].(deployplan.ActionType) + // If the action type is unset, use FieldTriggers. + if actionType == deployplan.ActionTypeUnset { + return a.ClassifyByTriggers(change), nil + } return actionType, nil } diff --git a/bundle/direct/dresources/all.go b/bundle/direct/dresources/all.go index dfb79a6343..3e73cf420a 100644 --- a/bundle/direct/dresources/all.go +++ b/bundle/direct/dresources/all.go @@ -19,6 +19,7 @@ var SupportedResources = map[string]any{ "database_catalogs": (*ResourceDatabaseCatalog)(nil), "synced_database_tables": (*ResourceSyncedDatabaseTable)(nil), "alerts": (*ResourceAlert)(nil), + "clusters": (*ResourceCluster)(nil), "registered_models": (*ResourceRegisteredModel)(nil), } diff --git a/bundle/direct/dresources/cluster.go b/bundle/direct/dresources/cluster.go new file mode 100644 index 0000000000..a3854ddb94 --- /dev/null +++ b/bundle/direct/dresources/cluster.go @@ -0,0 +1,194 @@ +package dresources + +import ( + "context" + "strings" + + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/deployplan" + "github.com/databricks/cli/libs/structs/structdiff" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/compute" +) + +type ResourceCluster struct { + client *databricks.WorkspaceClient +} + +func (r *ResourceCluster) New(client *databricks.WorkspaceClient) any { + return &ResourceCluster{ + client: client, + } +} + +func (r *ResourceCluster) PrepareState(input *resources.Cluster) *compute.ClusterSpec { + return &input.ClusterSpec +} + +func (r *ResourceCluster) RemapState(input *compute.ClusterDetails) *compute.ClusterSpec { + spec := &compute.ClusterSpec{ + ApplyPolicyDefaultValues: false, + Autoscale: input.Autoscale, + AutoterminationMinutes: input.AutoterminationMinutes, + AwsAttributes: input.AwsAttributes, + AzureAttributes: input.AzureAttributes, + ClusterLogConf: input.ClusterLogConf, + ClusterName: input.ClusterName, + CustomTags: input.CustomTags, + DataSecurityMode: input.DataSecurityMode, + DockerImage: input.DockerImage, + DriverInstancePoolId: input.DriverInstancePoolId, + DriverNodeTypeId: input.DriverNodeTypeId, + EnableElasticDisk: input.EnableElasticDisk, + EnableLocalDiskEncryption: input.EnableLocalDiskEncryption, + GcpAttributes: input.GcpAttributes, + InitScripts: input.InitScripts, + InstancePoolId: input.InstancePoolId, + IsSingleNode: input.IsSingleNode, + Kind: input.Kind, + NodeTypeId: input.NodeTypeId, + NumWorkers: input.NumWorkers, + PolicyId: input.PolicyId, + RemoteDiskThroughput: input.RemoteDiskThroughput, + RuntimeEngine: input.RuntimeEngine, + SingleUserName: input.SingleUserName, + SparkConf: input.SparkConf, + SparkEnvVars: input.SparkEnvVars, + SparkVersion: input.SparkVersion, + SshPublicKeys: input.SshPublicKeys, + TotalInitialRemoteDiskSize: input.TotalInitialRemoteDiskSize, + UseMlRuntime: input.UseMlRuntime, + WorkloadType: input.WorkloadType, + ForceSendFields: filterFields[compute.ClusterSpec](input.ForceSendFields), + } + if input.Spec != nil { + spec.ApplyPolicyDefaultValues = input.Spec.ApplyPolicyDefaultValues + } + return spec +} + +func (r *ResourceCluster) DoRefresh(ctx context.Context, id string) (*compute.ClusterDetails, error) { + return r.client.Clusters.GetByClusterId(ctx, id) +} + +func (r *ResourceCluster) DoCreate(ctx context.Context, config *compute.ClusterSpec) (string, error) { + wait, err := r.client.Clusters.Create(ctx, makeCreateCluster(config)) + if err != nil { + return "", err + } + return wait.ClusterId, nil +} + +func (r *ResourceCluster) DoUpdate(ctx context.Context, id string, config *compute.ClusterSpec) error { + _, err := r.client.Clusters.Edit(ctx, makeEditCluster(id, config)) + if err != nil { + return err + } + return nil +} + +func (r *ResourceCluster) DoResize(ctx context.Context, id string, config *compute.ClusterSpec) error { + _, err := r.client.Clusters.Resize(ctx, compute.ResizeCluster{ + ClusterId: id, + NumWorkers: config.NumWorkers, + Autoscale: config.Autoscale, + ForceSendFields: filterFields[compute.ResizeCluster](config.ForceSendFields), + }) + return err +} + +func (r *ResourceCluster) DoDelete(ctx context.Context, id string) error { + return r.client.Clusters.PermanentDeleteByClusterId(ctx, id) +} + +func (r *ResourceCluster) ClassifyChange(change structdiff.Change, remoteState *compute.ClusterDetails) (deployplan.ActionType, error) { + // Always update if the cluster is not running. + if remoteState.State != compute.StateRunning { + return deployplan.ActionTypeUpdate, nil + } + + changedPath := change.Path.String() + if changedPath == "num_workers" || strings.HasPrefix(changedPath, "autoscale") { + return deployplan.ActionTypeResize, nil + } + + return deployplan.ActionTypeUpdate, nil +} + +func makeCreateCluster(config *compute.ClusterSpec) compute.CreateCluster { + return compute.CreateCluster{ + ApplyPolicyDefaultValues: config.ApplyPolicyDefaultValues, + Autoscale: config.Autoscale, + AutoterminationMinutes: config.AutoterminationMinutes, + AwsAttributes: config.AwsAttributes, + AzureAttributes: config.AzureAttributes, + ClusterLogConf: config.ClusterLogConf, + ClusterName: config.ClusterName, + CloneFrom: nil, // Not supported by DABs + CustomTags: config.CustomTags, + DataSecurityMode: config.DataSecurityMode, + DockerImage: config.DockerImage, + DriverInstancePoolId: config.DriverInstancePoolId, + DriverNodeTypeId: config.DriverNodeTypeId, + EnableElasticDisk: config.EnableElasticDisk, + EnableLocalDiskEncryption: config.EnableLocalDiskEncryption, + GcpAttributes: config.GcpAttributes, + InitScripts: config.InitScripts, + InstancePoolId: config.InstancePoolId, + IsSingleNode: config.IsSingleNode, + Kind: config.Kind, + NodeTypeId: config.NodeTypeId, + NumWorkers: config.NumWorkers, + PolicyId: config.PolicyId, + RemoteDiskThroughput: config.RemoteDiskThroughput, + RuntimeEngine: config.RuntimeEngine, + SingleUserName: config.SingleUserName, + SparkConf: config.SparkConf, + SparkEnvVars: config.SparkEnvVars, + SparkVersion: config.SparkVersion, + SshPublicKeys: config.SshPublicKeys, + TotalInitialRemoteDiskSize: config.TotalInitialRemoteDiskSize, + UseMlRuntime: config.UseMlRuntime, + WorkloadType: config.WorkloadType, + ForceSendFields: filterFields[compute.CreateCluster](config.ForceSendFields), + } +} + +func makeEditCluster(id string, config *compute.ClusterSpec) compute.EditCluster { + return compute.EditCluster{ + ClusterId: id, + ApplyPolicyDefaultValues: config.ApplyPolicyDefaultValues, + Autoscale: config.Autoscale, + AutoterminationMinutes: config.AutoterminationMinutes, + AwsAttributes: config.AwsAttributes, + AzureAttributes: config.AzureAttributes, + ClusterLogConf: config.ClusterLogConf, + ClusterName: config.ClusterName, + CustomTags: config.CustomTags, + DataSecurityMode: config.DataSecurityMode, + DockerImage: config.DockerImage, + DriverInstancePoolId: config.DriverInstancePoolId, + DriverNodeTypeId: config.DriverNodeTypeId, + EnableElasticDisk: config.EnableElasticDisk, + EnableLocalDiskEncryption: config.EnableLocalDiskEncryption, + GcpAttributes: config.GcpAttributes, + InitScripts: config.InitScripts, + InstancePoolId: config.InstancePoolId, + IsSingleNode: config.IsSingleNode, + Kind: config.Kind, + NodeTypeId: config.NodeTypeId, + NumWorkers: config.NumWorkers, + PolicyId: config.PolicyId, + RemoteDiskThroughput: config.RemoteDiskThroughput, + RuntimeEngine: config.RuntimeEngine, + SingleUserName: config.SingleUserName, + SparkConf: config.SparkConf, + SparkEnvVars: config.SparkEnvVars, + SparkVersion: config.SparkVersion, + SshPublicKeys: config.SshPublicKeys, + TotalInitialRemoteDiskSize: config.TotalInitialRemoteDiskSize, + UseMlRuntime: config.UseMlRuntime, + WorkloadType: config.WorkloadType, + ForceSendFields: filterFields[compute.EditCluster](config.ForceSendFields), + } +} diff --git a/cmd/bundle/plan.go b/cmd/bundle/plan.go index 5309909583..e3c949e885 100644 --- a/cmd/bundle/plan.go +++ b/cmd/bundle/plan.go @@ -77,11 +77,11 @@ It is useful for previewing changes before running 'bundle deploy'.`, switch change.ActionType { case deployplan.ActionTypeCreate: createCount++ - case deployplan.ActionTypeUpdate, deployplan.ActionTypeUpdateWithID: + case deployplan.ActionTypeUpdate, deployplan.ActionTypeUpdateWithID, deployplan.ActionTypeResize: updateCount++ case deployplan.ActionTypeDelete: deleteCount++ - case deployplan.ActionTypeRecreate, deployplan.ActionTypeResize: + case deployplan.ActionTypeRecreate: // A recreate counts as both a delete and a create deleteCount++ createCount++ diff --git a/libs/testserver/clusters.go b/libs/testserver/clusters.go new file mode 100644 index 0000000000..e2caa1603e --- /dev/null +++ b/libs/testserver/clusters.go @@ -0,0 +1,120 @@ +package testserver + +import ( + "encoding/json" + "fmt" + + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/google/uuid" +) + +func (s *FakeWorkspace) ClustersCreate(req Request) any { + var request compute.ClusterDetails + if err := json.Unmarshal(req.Body, &request); err != nil { + return Response{ + StatusCode: 400, + Body: fmt.Sprintf("request parsing error: %s", err), + } + } + + defer s.LockUnlock()() + + clusterId := uuid.New().String() + request.ClusterId = clusterId + s.Clusters[clusterId] = request + + return Response{ + Body: compute.ClusterDetails{ + ClusterId: clusterId, + }, + } +} + +func (s *FakeWorkspace) ClustersResize(req Request) any { + var request compute.ResizeCluster + if err := json.Unmarshal(req.Body, &request); err != nil { + return Response{ + StatusCode: 400, + Body: fmt.Sprintf("request parsing error: %s", err), + } + } + + defer s.LockUnlock()() + cluster, ok := s.Clusters[request.ClusterId] + if !ok { + return Response{StatusCode: 404} + } + + cluster.NumWorkers = request.NumWorkers + cluster.Autoscale = request.Autoscale + s.Clusters[request.ClusterId] = cluster + + return Response{} +} + +func (s *FakeWorkspace) ClustersEdit(req Request) any { + var request compute.ClusterDetails + if err := json.Unmarshal(req.Body, &request); err != nil { + return Response{ + StatusCode: 400, + Body: fmt.Sprintf("request parsing error: %s", err), + } + } + + defer s.LockUnlock()() + _, ok := s.Clusters[request.ClusterId] + if !ok { + return Response{StatusCode: 404} + } + + s.Clusters[request.ClusterId] = request + return Response{} +} + +func (s *FakeWorkspace) ClustersGet(req Request, clusterId string) any { + defer s.LockUnlock()() + + cluster, ok := s.Clusters[clusterId] + if !ok { + return Response{StatusCode: 404} + } + + return Response{ + Body: cluster, + } +} + +func (s *FakeWorkspace) ClustersStart(req Request) any { + var request compute.StartCluster + if err := json.Unmarshal(req.Body, &request); err != nil { + return Response{ + StatusCode: 400, + Body: fmt.Sprintf("request parsing error: %s", err), + } + } + defer s.LockUnlock()() + + cluster, ok := s.Clusters[request.ClusterId] + if !ok { + return Response{StatusCode: 404} + } + + cluster.State = compute.StateRunning + s.Clusters[request.ClusterId] = cluster + + return Response{} +} + +func (s *FakeWorkspace) ClustersPermanentDelete(req Request) any { + var request compute.PermanentDeleteCluster + if err := json.Unmarshal(req.Body, &request); err != nil { + return Response{ + StatusCode: 400, + Body: fmt.Sprintf("request parsing error: %s", err), + } + } + + defer s.LockUnlock()() + delete(s.Clusters, request.ClusterId) + return Response{} +} diff --git a/libs/testserver/fake_workspace.go b/libs/testserver/fake_workspace.go index 09d8e3135e..d2ac6dda27 100644 --- a/libs/testserver/fake_workspace.go +++ b/libs/testserver/fake_workspace.go @@ -10,6 +10,7 @@ import ( "sync" "time" + "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/database" "github.com/databricks/databricks-sdk-go/service/apps" @@ -92,6 +93,7 @@ type FakeWorkspace struct { Alerts map[string]sql.AlertV2 Experiments map[string]ml.GetExperimentResponse ModelRegistryModels map[string]ml.Model + Clusters map[string]compute.ClusterDetails Catalogs map[string]catalog.CatalogInfo RegisteredModels map[string]catalog.RegisteredModelInfo @@ -200,6 +202,7 @@ func NewFakeWorkspace(url, token string) *FakeWorkspace { Alerts: map[string]sql.AlertV2{}, Experiments: map[string]ml.GetExperimentResponse{}, ModelRegistryModels: map[string]ml.Model{}, + Clusters: map[string]compute.ClusterDetails{}, } } diff --git a/libs/testserver/handlers.go b/libs/testserver/handlers.go index 286c6ecb6f..4fa2cfc307 100644 --- a/libs/testserver/handlers.go +++ b/libs/testserver/handlers.go @@ -528,6 +528,32 @@ func AddDefaultHandlers(server *Server) { return MapDelete(req.Workspace, req.Workspace.SyncedDatabaseTables, req.Vars["name"]) }) + // Clusters: + server.Handle("POST", "/api/2.1/clusters/resize", func(req Request) any { + return req.Workspace.ClustersResize(req) + }) + + server.Handle("POST", "/api/2.1/clusters/edit", func(req Request) any { + return req.Workspace.ClustersEdit(req) + }) + + server.Handle("GET", "/api/2.1/clusters/get", func(req Request) any { + clusterId := req.URL.Query().Get("cluster_id") + return req.Workspace.ClustersGet(req, clusterId) + }) + + server.Handle("POST", "/api/2.1/clusters/create", func(req Request) any { + return req.Workspace.ClustersCreate(req) + }) + + server.Handle("POST", "/api/2.1/clusters/start", func(req Request) any { + return req.Workspace.ClustersStart(req) + }) + + server.Handle("POST", "/api/2.1/clusters/permanent-delete", func(req Request) any { + return req.Workspace.ClustersPermanentDelete(req) + }) + // MLflow Experiments: server.Handle("GET", "/api/2.0/mlflow/experiments/get", func(req Request) any { experimentId := req.URL.Query().Get("experiment_id")