@@ -100,6 +100,68 @@ def test_job_assessment():
100100 assert result_set [1 ].success == 0
101101
102102
103+ def test_job_assessment_no_job_tasks ():
104+ sample_jobs = [
105+ BaseJob (
106+ created_time = 1694536604319 ,
107+ creator_user_name = "[email protected] " ,
108+ job_id = 536591785949415 ,
109+ settings = JobSettings (
110+ compute = None ,
111+ continuous = None ,
112+ tasks = None ,
113+ timeout_seconds = 0 ,
114+ ),
115+ ),
116+ ]
117+
118+ sample_clusters = [
119+ ClusterDetails (
120+ autoscale = AutoScale (min_workers = 1 , max_workers = 6 ),
121+ spark_conf = {"spark.databricks.delta.preview.enabled" : "true" },
122+ spark_context_id = 5134472582179566666 ,
123+ spark_env_vars = None ,
124+ spark_version = "13.3.x-cpu-ml-scala2.12" ,
125+ cluster_id = "0810-229933-chicago99" ,
126+ cluster_source = ClusterSource .JOB ,
127+ ),
128+ ]
129+ ws = Mock ()
130+ result_set = JobsCrawler (ws , MockBackend (), "ucx" )._assess_jobs (
131+ sample_jobs , {c .cluster_id : c for c in sample_clusters }
132+ )
133+ assert len (result_set ) == 1
134+ assert result_set [0 ].success == 1
135+
136+
137+ def test_job_assessment_no_job_settings ():
138+ sample_jobs = [
139+ BaseJob (
140+ created_time = 1694536604319 ,
141+ creator_user_name = "[email protected] " ,
142+ job_id = 536591785949415 ,
143+ settings = None ,
144+ ),
145+ ]
146+
147+ sample_clusters = [
148+ ClusterDetails (
149+ autoscale = AutoScale (min_workers = 1 , max_workers = 6 ),
150+ spark_conf = {"spark.databricks.delta.preview.enabled" : "true" },
151+ spark_context_id = 5134472582179566666 ,
152+ spark_env_vars = None ,
153+ spark_version = "13.3.x-cpu-ml-scala2.12" ,
154+ cluster_id = "0810-229933-chicago99" ,
155+ cluster_source = ClusterSource .JOB ,
156+ ),
157+ ]
158+ ws = Mock ()
159+ result_set = JobsCrawler (ws , MockBackend (), "ucx" )._assess_jobs (
160+ sample_jobs , {c .cluster_id : c for c in sample_clusters }
161+ )
162+ assert len (result_set ) == 0
163+
164+
103165def test_job_assessment_for_azure_spark_config ():
104166 sample_jobs = [
105167 BaseJob (
@@ -243,6 +305,47 @@ def test_job_assessment_for_azure_spark_config():
243305 assert result_set [2 ].success == 0
244306
245307
308+ def test_jobs_assessment_with_spn_cluster_no_job_tasks (mocker ):
309+ sample_jobs = [
310+ BaseJob (
311+ created_time = 1694536604319 ,
312+ creator_user_name = "[email protected] " ,
313+ job_id = 536591785949415 ,
314+ settings = JobSettings (
315+ compute = None ,
316+ continuous = None ,
317+ tasks = None ,
318+ timeout_seconds = 0 ,
319+ ),
320+ )
321+ ]
322+
323+ ws = mocker .Mock ()
324+ ws .clusters .list .return_value = []
325+ ws .jobs .list .return_value = sample_jobs
326+
327+ jobs = AzureServicePrincipalCrawler (ws , MockBackend (), "ucx" )._list_all_jobs_with_spn_in_spark_conf ()
328+ assert len (jobs ) == 0
329+
330+
331+ def test_jobs_assessment_with_spn_cluster_no_job_settings (mocker ):
332+ sample_jobs = [
333+ BaseJob (
334+ created_time = 1694536604319 ,
335+ creator_user_name = "[email protected] " ,
336+ job_id = 536591785949415 ,
337+ settings = None ,
338+ )
339+ ]
340+
341+ ws = mocker .Mock ()
342+ ws .clusters .list .return_value = []
343+ ws .jobs .list .return_value = sample_jobs
344+
345+ jobs = AzureServicePrincipalCrawler (ws , MockBackend (), "ucx" )._list_all_jobs_with_spn_in_spark_conf ()
346+ assert len (jobs ) == 0
347+
348+
246349def test_jobs_assessment_with_spn_cluster_policy_not_found (mocker ):
247350 sample_jobs = [
248351 BaseJob (
0 commit comments