|
1 | 1 | import logging |
2 | 2 | import os |
| 3 | +import random |
3 | 4 | import shutil |
4 | 5 | import subprocess |
5 | 6 | import sys |
|
8 | 9 |
|
9 | 10 | import pytest |
10 | 11 | from databricks.sdk.service import compute, jobs |
| 12 | +from databricks.sdk.service.iam import PermissionLevel |
11 | 13 | from databricks.sdk.service.workspace import ImportFormat |
12 | 14 |
|
| 15 | +from databricks.labs.ucx.inventory.types import RequestObjectType |
13 | 16 | from databricks.labs.ucx.providers.mixins.compute import CommandExecutor |
| 17 | +from databricks.labs.ucx.tacl.grants import Grant |
| 18 | +from databricks.labs.ucx.tacl.tables import Table |
14 | 19 |
|
15 | 20 | logger = logging.getLogger(__name__) |
16 | 21 |
|
@@ -96,63 +101,55 @@ def test_sql_backend_works(ws, wsfs_wheel): |
96 | 101 | def test_toolkit_notebook( |
97 | 102 | ws, |
98 | 103 | sql_exec, |
| 104 | + sql_fetch_all, |
99 | 105 | wsfs_wheel, |
100 | | - make_cluster, |
101 | 106 | make_cluster_policy, |
102 | | - make_directory, |
| 107 | + make_cluster_policy_permissions, |
103 | 108 | make_ucx_group, |
104 | | - make_instance_pool, |
105 | 109 | make_job, |
106 | | - make_notebook, |
107 | | - make_pipeline, |
| 110 | + make_job_permissions, |
108 | 111 | make_random, |
109 | | - make_repo, |
110 | | - make_secret_scope, |
111 | 112 | make_schema, |
112 | 113 | make_table, |
113 | | - make_user, |
114 | 114 | ): |
115 | 115 | logger.info("setting up fixtures") |
116 | 116 |
|
117 | | - user_a = make_user() |
118 | | - user_b = make_user() |
119 | | - user_c = make_user() |
120 | | - |
121 | | - logger.info(f"user_a={user_a}, user_b={user_b}, user_c={user_c}, ") |
122 | | - |
123 | | - # TODO add users to groups |
124 | 117 | ws_group_a, acc_group_a = make_ucx_group() |
| 118 | + members_src_a = sorted([_.display for _ in ws.groups.get(id=ws_group_a.id).members]) |
125 | 119 | ws_group_b, acc_group_b = make_ucx_group() |
| 120 | + members_src_b = sorted([_.display for _ in ws.groups.get(id=ws_group_b.id).members]) |
126 | 121 | ws_group_c, acc_group_c = make_ucx_group() |
| 122 | + members_src_c = sorted([_.display for _ in ws.groups.get(id=ws_group_c.id).members]) |
127 | 123 |
|
128 | 124 | selected_groups = ",".join([ws_group_a.display_name, ws_group_b.display_name, ws_group_c.display_name]) |
129 | 125 |
|
130 | 126 | logger.info(f"group_a={ws_group_a}, group_b={ws_group_b}, group_c={ws_group_c}, ") |
131 | 127 |
|
132 | | - cluster = make_cluster(instance_pool_id=os.environ["TEST_INSTANCE_POOL_ID"], single_node=True) |
133 | 128 | cluster_policy = make_cluster_policy() |
134 | | - directory = make_directory() |
135 | | - instance_pool = make_instance_pool() |
| 129 | + make_cluster_policy_permissions( |
| 130 | + object_id=cluster_policy.policy_id, |
| 131 | + permission_level=random.choice([PermissionLevel.CAN_USE]), |
| 132 | + group_name=ws_group_a.display_name, |
| 133 | + ) |
| 134 | + cpp_src = ws.permissions.get(RequestObjectType.CLUSTER_POLICIES, cluster_policy.policy_id) |
| 135 | + cluster_policy_src_permissions = sorted( |
| 136 | + [_ for _ in cpp_src.access_control_list if _.group_name == ws_group_a.display_name], |
| 137 | + key=lambda p: p.group_name, |
| 138 | + ) |
136 | 139 | job = make_job() |
137 | | - notebook = make_notebook() |
138 | | - pipeline = make_pipeline() |
139 | | - repo = make_repo() |
140 | | - secret_scope = make_secret_scope() |
141 | | - |
142 | | - logger.info( |
143 | | - f"cluster={cluster}, " |
144 | | - f"cluster_policy={cluster_policy}, " |
145 | | - f"directory={directory}, " |
146 | | - f"instance_pool={instance_pool}, " |
147 | | - f"job={job}, " |
148 | | - f"notebook={notebook}, " |
149 | | - f"pipeline={pipeline}" |
150 | | - f"repo={repo}, " |
151 | | - f"secret_scope={secret_scope}, " |
| 140 | + make_job_permissions( |
| 141 | + object_id=job.job_id, |
| 142 | + permission_level=random.choice( |
| 143 | + [PermissionLevel.CAN_VIEW, PermissionLevel.CAN_MANAGE_RUN, PermissionLevel.CAN_MANAGE] |
| 144 | + ), |
| 145 | + group_name=ws_group_b.display_name, |
152 | 146 | ) |
153 | | - |
154 | | - # TODO create fixtures for DBSQL assets |
155 | | - # TODO set permissions |
| 147 | + jp_src = ws.permissions.get(RequestObjectType.JOBS, job.job_id) |
| 148 | + job_src_permissions = sorted( |
| 149 | + [_ for _ in jp_src.access_control_list if _.group_name == ws_group_b.display_name], |
| 150 | + key=lambda p: p.group_name, |
| 151 | + ) |
| 152 | + logger.info(f"cluster_policy={cluster_policy}, job={job}, ") |
156 | 153 |
|
157 | 154 | schema_a = make_schema() |
158 | 155 | schema_b = make_schema() |
@@ -222,7 +219,91 @@ def test_toolkit_notebook( |
222 | 219 |
|
223 | 220 | try: |
224 | 221 | ws.jobs.run_now(created_job.job_id).result() |
225 | | - # TODO Validate migration, tacl |
| 222 | + |
| 223 | + logger.info("validating group ids") |
| 224 | + |
| 225 | + dst_ws_group_a = ws.groups.list(filter=f"displayName eq {ws_group_a.display_name}")[0] |
| 226 | + assert ( |
| 227 | + ws_group_a.id != dst_ws_group_a.id |
| 228 | + ), f"Group id for target group {ws_group_a.display_name} should differ from group id of source group" |
| 229 | + |
| 230 | + dst_ws_group_b = ws.groups.list(filter=f"displayName eq {ws_group_b.display_name}")[0] |
| 231 | + assert ( |
| 232 | + ws_group_b.id != dst_ws_group_b.id |
| 233 | + ), f"Group id for target group {ws_group_b.display_name} should differ from group id of source group" |
| 234 | + |
| 235 | + dst_ws_group_c = ws.groups.list(filter=f"displayName eq {ws_group_c.display_name}")[0] |
| 236 | + assert ( |
| 237 | + ws_group_c.id != dst_ws_group_c.id |
| 238 | + ), f"Group id for target group {ws_group_c.display_name} should differ from group id of source group" |
| 239 | + |
| 240 | + logger.info("validating group members") |
| 241 | + |
| 242 | + members_dst_a = sorted([_.display for _ in ws.groups.get(id=dst_ws_group_a.id).members]) |
| 243 | + assert members_dst_a == members_src_a, f"Members from {ws_group_a.display_name} were not migrated correctly" |
| 244 | + |
| 245 | + members_dst_b = sorted([_.display for _ in ws.groups.get(id=dst_ws_group_b.id).members]) |
| 246 | + assert members_dst_b == members_src_b, f"Members in {ws_group_b.display_name} were not migrated correctly" |
| 247 | + |
| 248 | + members_dst_c = sorted([_.display for _ in ws.groups.get(id=dst_ws_group_c.id).members]) |
| 249 | + assert members_dst_c == members_src_c, f"Members in {ws_group_c.display_name} were not migrated correctly" |
| 250 | + |
| 251 | + logger.info("validating permissions") |
| 252 | + |
| 253 | + cp_dst = ws.permissions.get(RequestObjectType.CLUSTER_POLICIES, cluster_policy.policy_id) |
| 254 | + cluster_policy_dst_permissions = sorted( |
| 255 | + [_ for _ in cp_dst.access_control_list if _.group_name == ws_group_a.display_name], |
| 256 | + key=lambda p: p.group_name, |
| 257 | + ) |
| 258 | + assert len(cluster_policy_dst_permissions) == len( |
| 259 | + cluster_policy_src_permissions |
| 260 | + ), "Target permissions were not applied correctly for cluster policies" |
| 261 | + assert [t.all_permissions for t in cluster_policy_dst_permissions] == [ |
| 262 | + s.all_permissions for s in cluster_policy_src_permissions |
| 263 | + ], "Target permissions were not applied correctly for cluster policies" |
| 264 | + |
| 265 | + jp_dst = ws.permissions.get(RequestObjectType.JOBS, job.job_id) |
| 266 | + job_dst_permissions = sorted( |
| 267 | + [_ for _ in jp_dst.access_control_list if _.group_name == ws_group_b.display_name], |
| 268 | + key=lambda p: p.group_name, |
| 269 | + ) |
| 270 | + assert len(job_dst_permissions) == len( |
| 271 | + job_src_permissions |
| 272 | + ), f"Target permissions were not applied correctly for {RequestObjectType.JOBS}/{job.job_id}" |
| 273 | + assert [t.all_permissions for t in job_dst_permissions] == [ |
| 274 | + s.all_permissions for s in job_src_permissions |
| 275 | + ], f"Target permissions were not applied correctly for {RequestObjectType.JOBS}/{job.job_id}" |
| 276 | + |
| 277 | + logger.info("validating tacl") |
| 278 | + |
| 279 | + tables = sql_fetch_all(f"SELECT * FROM hive_metastore.{inventory_database}.tables") |
| 280 | + print(list(sql_fetch_all(f"SELECT * FROM hive_metastore.{inventory_database}.tables"))) |
| 281 | + |
| 282 | + all_tables = {} |
| 283 | + for t_row in tables: |
| 284 | + table = Table(*t_row) |
| 285 | + all_tables[table.key] = table |
| 286 | + |
| 287 | + assert len(all_tables) >= 2, "must have at least two tables" |
| 288 | + |
| 289 | + logger.debug(f"all tables={all_tables}, ") |
| 290 | + |
| 291 | + grants = sql_fetch_all(f"SELECT * FROM hive_metastore.{inventory_database}.grants") |
| 292 | + all_grants = {} |
| 293 | + for g_row in grants: |
| 294 | + grant = Grant(*g_row) |
| 295 | + if grant.table: |
| 296 | + all_grants[f"{grant.principal}.{grant.catalog}.{grant.database}.{grant.table}"] = grant.action_type |
| 297 | + else: |
| 298 | + all_grants[f"{grant.principal}.{grant.catalog}.{grant.database}"] = grant.action_type |
| 299 | + |
| 300 | + logger.debug(f"all grants={all_grants}, ") |
| 301 | + |
| 302 | + assert len(all_grants) >= 3, "must have at least three grants" |
| 303 | + assert all_grants[f"{ws_group_a.display_name}.{table_a}"] == "SELECT" |
| 304 | + assert all_grants[f"{ws_group_b.display_name}.{table_b}"] == "SELECT" |
| 305 | + assert all_grants[f"{ws_group_b.display_name}.{schema_b}"] == "MODIFY" |
| 306 | + |
226 | 307 | finally: |
227 | 308 | logger.info("deleting workbook") |
228 | 309 |
|
|
0 commit comments