1+ terraform {
2+ required_providers {
3+ databricks = {
4+ source = " databrickslabs/databricks"
5+ }
6+ }
7+ }
8+
9+ provider "aws" {
10+ region = local. region
11+ }
12+
13+ // initialize provider in "MWS" mode to provision new workspace
14+ provider "databricks" {
15+ alias = " mws"
16+ host = " https://accounts.cloud.databricks.com"
17+ }
18+
19+ data "databricks_aws_assume_role_policy" "this" {
20+ provider = databricks. mws
21+ external_id = local. account_id
22+ }
23+
24+ resource "aws_iam_role" "cross_account_role" {
25+ name = " ${ local . prefix } -cx-terraform-it"
26+ assume_role_policy = data. databricks_aws_assume_role_policy . this . json
27+ tags = local. tags
28+ }
29+
30+ data "databricks_aws_crossaccount_policy" "this" {
31+ provider = databricks. mws
32+ pass_roles = [aws_iam_role . data_role . arn ]
33+ }
34+
35+ resource "aws_iam_role_policy" "this" {
36+ name = " ${ local . prefix } -cx-terraform-it"
37+ role = aws_iam_role. cross_account_role . id
38+ policy = data. databricks_aws_crossaccount_policy . this . json
39+ }
40+
41+ // register cross-account ARN
42+ resource "databricks_mws_credentials" "this" {
43+ provider = databricks. mws
44+ account_id = local. account_id
45+ role_arn = aws_iam_role. cross_account_role . arn
46+ credentials_name = " ${ local . prefix } -creds"
47+
48+ // not explicitly needed by this, but to make sure a smooth deployment
49+ depends_on = [aws_iam_role_policy . this ]
50+ }
51+
52+ resource "aws_s3_bucket" "root_storage_bucket" {
53+ bucket = " ${ local . prefix } -cx-terraform-it"
54+ acl = " private"
55+ versioning {
56+ enabled = false
57+ }
58+ force_destroy = true
59+ tags = merge (local. tags , {
60+ Name = " ${ local . prefix } -cx-terraform-it"
61+ })
62+ }
63+
64+ resource "aws_s3_bucket_public_access_block" "root_storage_bucket" {
65+ bucket = aws_s3_bucket. root_storage_bucket . id
66+ ignore_public_acls = true
67+ depends_on = [aws_s3_bucket . root_storage_bucket ]
68+ }
69+
70+ data "databricks_aws_bucket_policy" "this" {
71+ bucket = aws_s3_bucket. root_storage_bucket . bucket
72+ }
73+
74+ resource "aws_s3_bucket_policy" "root_bucket_policy" {
75+ bucket = aws_s3_bucket. root_storage_bucket . id
76+ policy = data. databricks_aws_bucket_policy . this . json
77+ }
78+
79+ // register root bucket
80+ resource "databricks_mws_storage_configurations" "this" {
81+ provider = databricks. mws
82+ account_id = local. account_id
83+ bucket_name = aws_s3_bucket. root_storage_bucket . bucket
84+ storage_configuration_name = " ${ local . prefix } -cx-terraform-it"
85+ }
86+
87+ // register VPC
88+ data "aws_availability_zones" "available" {}
89+
90+ module "vpc" {
91+ source = " terraform-aws-modules/vpc/aws"
92+ version = " 3.2.0"
93+
94+ name = local. prefix
95+ cidr = local. cidr_block
96+ azs = data. aws_availability_zones . available . names
97+ tags = local. tags
98+
99+ enable_dns_hostnames = true
100+ enable_nat_gateway = true
101+ single_nat_gateway = true
102+ create_igw = true
103+
104+ public_subnets = [cidrsubnet (local. cidr_block , 3 , 0 )]
105+ private_subnets = [cidrsubnet (local. cidr_block , 3 , 1 ),
106+ cidrsubnet (local. cidr_block , 3 , 2 )]
107+
108+ manage_default_security_group = true
109+ default_security_group_name = " ${ local . prefix } -sg"
110+
111+ default_security_group_egress = [{
112+ cidr_blocks = " 0.0.0.0/0"
113+ }]
114+
115+ default_security_group_ingress = [{
116+ description = " Allow all internal TCP and UDP"
117+ self = true
118+ }]
119+ }
120+
121+ resource "databricks_mws_networks" "this" {
122+ provider = databricks. mws
123+ account_id = local. account_id
124+ network_name = " ${ local . prefix } -network"
125+ security_group_ids = [module . vpc . default_security_group_id ]
126+ subnet_ids = module. vpc . private_subnets
127+ vpc_id = module. vpc . vpc_id
128+ }
129+
130+ // create workspace in given VPC with DBFS on root bucket
131+ resource "databricks_mws_workspaces" "this" {
132+ provider = databricks. mws
133+ account_id = local. account_id
134+ aws_region = local. region
135+ workspace_name = local. prefix
136+ deployment_name = local. prefix
137+
138+ credentials_id = databricks_mws_credentials. this . credentials_id
139+ storage_configuration_id = databricks_mws_storage_configurations. this . storage_configuration_id
140+ network_id = databricks_mws_networks. this . network_id
141+ }
142+
143+ // initialize provider in normal mode
144+ provider "databricks" {
145+ // in normal scenario you won't have to give providers aliases
146+ alias = " created_workspace"
147+
148+ host = databricks_mws_workspaces. this . workspace_url
149+ }
150+
151+ // create PAT token to provision entities within workspace
152+ resource "databricks_token" "pat" {
153+ provider = databricks. created_workspace
154+ comment = " Terraform Provisioning"
155+ // 1 day token
156+ lifetime_seconds = 60 * 60 * 24 * 7
157+ }
158+
159+ // create bucket for mounting
160+ resource "aws_s3_bucket" "ds" {
161+ bucket = " ${ local . prefix } -ds"
162+ acl = " private"
163+ versioning {
164+ enabled = false
165+ }
166+ force_destroy = true
167+ tags = merge (local. tags , {
168+ Name = " ${ local . prefix } -ds"
169+ })
170+ }
171+
172+ data "aws_iam_policy_document" "assume_role_for_ec2" {
173+ statement {
174+ effect = " Allow"
175+ actions = [" sts:AssumeRole" ]
176+ principals {
177+ identifiers = [" ec2.amazonaws.com" ]
178+ type = " Service"
179+ }
180+ }
181+ }
182+
183+ resource "aws_iam_role" "data_role" {
184+ name = " ${ local . prefix } -first-ec2s3"
185+ description = " (${ local . prefix } ) EC2 Assume Role role for S3 access"
186+ assume_role_policy = data. aws_iam_policy_document . assume_role_for_ec2 . json
187+ tags = local. tags
188+ }
189+
190+ resource "aws_iam_instance_profile" "this" {
191+ name = " ${ local . prefix } -first-profile"
192+ role = aws_iam_role. data_role . name
193+ }
194+
195+ data "databricks_aws_bucket_policy" "ds" {
196+ provider = databricks. mws
197+ full_access_role = aws_iam_role. data_role . arn
198+ bucket = aws_s3_bucket. ds . bucket
199+ }
200+
201+ // allow databricks to access this bucket
202+ resource "aws_s3_bucket_policy" "ds" {
203+ bucket = aws_s3_bucket. ds . id
204+ policy = data. databricks_aws_bucket_policy . ds . json
205+ }
206+
207+ // block all public access to created bucket
208+ resource "aws_s3_bucket_public_access_block" "this" {
209+ bucket = aws_s3_bucket. ds . id
210+ ignore_public_acls = true
211+ }
212+
213+ resource "aws_s3_bucket_object" "this" {
214+ key = " /dummy-${ aws_s3_bucket_public_access_block . this . bucket } /main.tf"
215+ bucket = aws_s3_bucket. ds . id
216+ source = " ${ path . module } /main.tf"
217+ tags = local. tags
218+ }
219+
220+ resource "azurerm_container_group" "aws" {
221+ name = " ${ local . prefix } -aws-run"
222+ location = azurerm_resource_group. this . location
223+ resource_group_name = azurerm_resource_group. this . name
224+ tags = azurerm_resource_group. this . tags
225+
226+ os_type = " Linux"
227+ restart_policy = " Never"
228+ ip_address_type = " Private"
229+ network_profile_id = azurerm_network_profile. this . id
230+
231+ identity {
232+ type = " UserAssigned"
233+ identity_ids = [azurerm_user_assigned_identity . this . id ]
234+ }
235+
236+ container {
237+ name = " acceptance"
238+ image = " ghcr.io/databrickslabs/terraform-provider-it:master"
239+ cpu = " 2"
240+ memory = " 2"
241+ environment_variables = {
242+ CLOUD_ENV = " AWS"
243+ TEST_FILTER = " TestAcc"
244+ DATABRICKS_HOST = databricks_mws_workspaces.this.workspace_url
245+ TEST_S3_BUCKET = aws_s3_bucket.ds.bucket
246+ TEST_EC2_INSTANCE_PROFILE = aws_iam_instance_profile.this.arn
247+ }
248+
249+ secure_environment_variables = {
250+ DATABRICKS_TOKEN = databricks_token.pat.token_value
251+ }
252+
253+ ports {
254+ port = 443
255+ protocol = " TCP"
256+ }
257+ }
258+ }
0 commit comments