Skip to content

Commit 8b3c2c9

Browse files
authored
Added runtime_engine to databricks_cluster (#1686)
1 parent 3fef9d9 commit 8b3c2c9

File tree

5 files changed

+102
-11
lines changed

5 files changed

+102
-11
lines changed

clusters/clusters_api.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -397,6 +397,7 @@ type Cluster struct {
397397
SingleUserName string `json:"single_user_name,omitempty"`
398398
IdempotencyToken string `json:"idempotency_token,omitempty" tf:"force_new"`
399399
WorkloadType *WorkloadType `json:"workload_type,omitempty"`
400+
RuntimeEngine string `json:"runtime_engine,omitempty"`
400401
}
401402

402403
func (cluster Cluster) Validate() error {
@@ -489,6 +490,7 @@ type ClusterInfo struct {
489490
ClusterLogStatus *LogSyncStatus `json:"cluster_log_status,omitempty"`
490491
TerminationReason *TerminationReason `json:"termination_reason,omitempty"`
491492
DataSecurityMode string `json:"data_security_mode,omitempty"`
493+
RuntimeEngine string `json:"runtime_engine,omitempty"`
492494
}
493495

494496
// IsRunningOrResizing returns true if cluster is running or resizing

clusters/resource_cluster.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,8 @@ func resourceClusterSchema() map[string]*schema.Schema {
8383
s["driver_node_type_id"].ConflictsWith = []string{"driver_instance_pool_id", "instance_pool_id"}
8484
s["node_type_id"].ConflictsWith = []string{"driver_instance_pool_id", "instance_pool_id"}
8585

86+
s["runtime_engine"].ValidateFunc = validation.StringInSlice([]string{"PHOTON", "STANDARD"}, false)
87+
8688
s["is_pinned"] = &schema.Schema{
8789
Type: schema.TypeBool,
8890
Optional: true,

clusters/resource_cluster_test.go

Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -359,6 +359,78 @@ func TestResourceClusterCreate_WithLibraries(t *testing.T) {
359359
assert.Equal(t, "abc", d.Id())
360360
}
361361

362+
func TestResourceClusterCreatePhoton(t *testing.T) {
363+
d, err := qa.ResourceFixture{
364+
Fixtures: []qa.HTTPFixture{
365+
{
366+
Method: "POST",
367+
Resource: "/api/2.0/clusters/create",
368+
ExpectedRequest: Cluster{
369+
NumWorkers: 100,
370+
ClusterName: "Shared Autoscaling",
371+
SparkVersion: "7.1-scala12",
372+
NodeTypeID: "i3.xlarge",
373+
AutoterminationMinutes: 15,
374+
RuntimeEngine: "PHOTON",
375+
},
376+
Response: ClusterInfo{
377+
ClusterID: "abc",
378+
State: ClusterStateRunning,
379+
},
380+
},
381+
{
382+
Method: "GET",
383+
ReuseRequest: true,
384+
Resource: "/api/2.0/clusters/get?cluster_id=abc",
385+
Response: ClusterInfo{
386+
ClusterID: "abc",
387+
NumWorkers: 100,
388+
ClusterName: "Shared Autoscaling",
389+
SparkVersion: "7.1-scala12",
390+
NodeTypeID: "i3.xlarge",
391+
AutoterminationMinutes: 15,
392+
State: ClusterStateRunning,
393+
RuntimeEngine: "PHOTON",
394+
},
395+
},
396+
{
397+
Method: "POST",
398+
Resource: "/api/2.0/clusters/events",
399+
ExpectedRequest: EventsRequest{
400+
ClusterID: "abc",
401+
Limit: 1,
402+
Order: SortDescending,
403+
EventTypes: []ClusterEventType{EvTypePinned, EvTypeUnpinned},
404+
},
405+
Response: EventsResponse{
406+
Events: []ClusterEvent{},
407+
TotalCount: 0,
408+
},
409+
},
410+
{
411+
Method: "GET",
412+
Resource: "/api/2.0/libraries/cluster-status?cluster_id=abc",
413+
Response: libraries.ClusterLibraryStatuses{
414+
LibraryStatuses: []libraries.LibraryStatus{},
415+
},
416+
},
417+
},
418+
Create: true,
419+
Resource: ResourceCluster(),
420+
State: map[string]any{
421+
"autotermination_minutes": 15,
422+
"cluster_name": "Shared Autoscaling",
423+
"spark_version": "7.1-scala12",
424+
"node_type_id": "i3.xlarge",
425+
"num_workers": 100,
426+
"is_pinned": false,
427+
"runtime_engine": "PHOTON",
428+
},
429+
}.Apply(t)
430+
assert.NoError(t, err, err)
431+
assert.Equal(t, "abc", d.Id())
432+
}
433+
362434
func TestResourceClusterCreate_Error(t *testing.T) {
363435
d, err := qa.ResourceFixture{
364436
Fixtures: []qa.HTTPFixture{

docs/data-sources/cluster.md

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,9 +29,11 @@ data "databricks_cluster" "all" {
2929
## Attribute Reference
3030

3131
This data source exports the following attributes:
32+
3233
* `cluster_info` block, consisting of following fields:
3334
* `cluster_name` - Cluster name, which doesn’t have to be unique.
3435
* `spark_version` - [Runtime version](https://docs.databricks.com/runtime/index.html) of the cluster.
36+
* `runtime_engine` - The type of runtime of the cluster
3537
* `driver_node_type_id` - The node type of the Spark driver.
3638
* `node_type_id` - Any supported [databricks_node_type](../data-sources/node_type.md) id.
3739
* `instance_pool_id` The [pool of idle instances](instance_pool.md) the cluster is attached to.
@@ -58,4 +60,4 @@ The following resources are often used in the same context:
5860
* [databricks_instance_pool](../resources/instance_pool.md) to manage [instance pools](https://docs.databricks.com/clusters/instance-pools/index.html) to reduce [cluster](../resources/cluster.md) start and auto-scaling times by maintaining a set of idle, ready-to-use instances.
5961
* [databricks_job](../resources/job.md) to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a [databricks_cluster](../resources/cluster.md).
6062
* [databricks_library](../resources/library.md) to install a [library](https://docs.databricks.com/libraries/index.html) on [databricks_cluster](../resources/cluster.md).
61-
* [databricks_pipeline](../resources/pipeline.md) to deploy [Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html).
63+
* [databricks_pipeline](../resources/pipeline.md) to deploy [Delta Live Tables](https://docs.databricks.com/data-engineering/delta-live-tables/index.html).

0 commit comments

Comments
 (0)