|
| 1 | +package exporter |
| 2 | + |
| 3 | +import ( |
| 4 | + "log" |
| 5 | + "strings" |
| 6 | + |
| 7 | + sdk_compute "github.com/databricks/databricks-sdk-go/service/compute" |
| 8 | +) |
| 9 | + |
| 10 | +func listClusters(ic *importContext) error { |
| 11 | + lastActiveMs := ic.getLastActiveMs() |
| 12 | + interactiveClusters := []sdk_compute.ClusterSource{sdk_compute.ClusterSourceUi, sdk_compute.ClusterSourceApi} |
| 13 | + |
| 14 | + it := ic.workspaceClient.Clusters.List(ic.Context, sdk_compute.ListClustersRequest{ |
| 15 | + FilterBy: &sdk_compute.ListClustersFilterBy{ |
| 16 | + ClusterSources: interactiveClusters, |
| 17 | + }, |
| 18 | + PageSize: 100, |
| 19 | + }) |
| 20 | + i := 0 |
| 21 | + for it.HasNext(ic.Context) { |
| 22 | + c, err := it.Next(ic.Context) |
| 23 | + if err != nil { |
| 24 | + return err |
| 25 | + } |
| 26 | + i++ |
| 27 | + |
| 28 | + if strings.HasPrefix(c.ClusterName, "terraform-") { |
| 29 | + log.Printf("[INFO] Skipping terraform-specific cluster %s", c.ClusterName) |
| 30 | + continue |
| 31 | + } |
| 32 | + if !ic.MatchesName(c.ClusterName) { |
| 33 | + log.Printf("[INFO] Skipping %s because it doesn't match %s", c.ClusterName, ic.match) |
| 34 | + continue |
| 35 | + } |
| 36 | + if c.LastRestartedTime > 0 && c.LastRestartedTime < lastActiveMs { |
| 37 | + log.Printf("[INFO] Old inactive cluster %s", c.ClusterName) |
| 38 | + continue |
| 39 | + } |
| 40 | + ic.Emit(&resource{ |
| 41 | + Resource: "databricks_cluster", |
| 42 | + ID: c.ClusterId, |
| 43 | + }) |
| 44 | + if i%50 == 0 { |
| 45 | + log.Printf("[INFO] Scanned %d clusters", i) |
| 46 | + } |
| 47 | + } |
| 48 | + return nil |
| 49 | +} |
| 50 | + |
| 51 | +func (ic *importContext) importCluster(c *sdk_compute.ClusterSpec) { |
| 52 | + if c == nil { |
| 53 | + return |
| 54 | + } |
| 55 | + if c.AwsAttributes != nil && c.AwsAttributes.InstanceProfileArn != "" { |
| 56 | + ic.Emit(&resource{ |
| 57 | + Resource: "databricks_instance_profile", |
| 58 | + ID: c.AwsAttributes.InstanceProfileArn, |
| 59 | + }) |
| 60 | + } |
| 61 | + if c.InstancePoolId != "" { |
| 62 | + // set enable_elastic_disk to false, and remove aws/gcp/azure_attributes |
| 63 | + ic.Emit(&resource{ |
| 64 | + Resource: "databricks_instance_pool", |
| 65 | + ID: c.InstancePoolId, |
| 66 | + }) |
| 67 | + } |
| 68 | + if c.DriverInstancePoolId != "" { |
| 69 | + ic.Emit(&resource{ |
| 70 | + Resource: "databricks_instance_pool", |
| 71 | + ID: c.DriverInstancePoolId, |
| 72 | + }) |
| 73 | + } |
| 74 | + if c.PolicyId != "" { |
| 75 | + ic.Emit(&resource{ |
| 76 | + Resource: "databricks_cluster_policy", |
| 77 | + ID: c.PolicyId, |
| 78 | + }) |
| 79 | + } |
| 80 | + ic.emitInitScripts(c.InitScripts) |
| 81 | + ic.emitSecretsFromSecretsPathMap(c.SparkConf) |
| 82 | + ic.emitSecretsFromSecretsPathMap(c.SparkEnvVars) |
| 83 | + ic.emitUserOrServicePrincipal(c.SingleUserName) |
| 84 | + if c.Kind.String() != "" && c.SingleUserName != "" { |
| 85 | + ic.Emit(&resource{ |
| 86 | + Resource: "databricks_group", |
| 87 | + Attribute: "display_name", |
| 88 | + Value: c.SingleUserName, |
| 89 | + }) |
| 90 | + } |
| 91 | +} |
0 commit comments