|
31 | 31 | EventKey,
|
32 | 32 | TaskJobLogsRetrieveContext,
|
33 | 33 | )
|
| 34 | +from cylc.flow.task_state import ( |
| 35 | + TASK_STATUS_PREPARING, |
| 36 | + TASK_STATUS_SUBMIT_FAILED, |
| 37 | +) |
34 | 38 |
|
35 | 39 | from .test_workflow_events import TEMPLATES
|
36 | 40 |
|
@@ -126,7 +130,7 @@ async def test__insert_task_job(flow, one_conf, scheduler, start, validate):
|
126 | 130 |
|
127 | 131 |
|
128 | 132 | async def test__always_insert_task_job(
|
129 |
| - flow, scheduler, mock_glbl_cfg, start, run |
| 133 | + flow, scheduler, mock_glbl_cfg, start |
130 | 134 | ):
|
131 | 135 | """Insert Task Job _Always_ inserts a task into the data store.
|
132 | 136 |
|
@@ -189,6 +193,46 @@ async def test__always_insert_task_job(
|
189 | 193 | assert job.submitted_time
|
190 | 194 |
|
191 | 195 |
|
| 196 | +async def test__submit_failed_job_id(flow, scheduler, start, db_select): |
| 197 | + """If a job is killed in the submitted state, the job ID should still be |
| 198 | + in the DB/data store. |
| 199 | +
|
| 200 | + See https://github.com/cylc/cylc-flow/pull/6926 |
| 201 | + """ |
| 202 | + async def get_ds_job_id(schd: Scheduler): |
| 203 | + await schd.update_data_structure() |
| 204 | + return list(schd.data_store_mgr.data[schd.id][JOBS].values())[0].job_id |
| 205 | + |
| 206 | + id_ = flow('foo') |
| 207 | + schd: Scheduler = scheduler(id_) |
| 208 | + job_id = '1234' |
| 209 | + async with start(schd): |
| 210 | + itask = schd.pool.get_tasks()[0] |
| 211 | + itask.state_reset(TASK_STATUS_PREPARING) |
| 212 | + itask.submit_num = 1 |
| 213 | + itask.summary['submit_method_id'] = job_id |
| 214 | + schd.workflow_db_mgr.put_insert_task_jobs(itask, {}) |
| 215 | + schd.task_events_mgr.process_message( |
| 216 | + itask, 'INFO', schd.task_events_mgr.EVENT_SUBMITTED |
| 217 | + ) |
| 218 | + assert await get_ds_job_id(schd) == job_id |
| 219 | + |
| 220 | + schd.task_events_mgr.process_message( |
| 221 | + itask, 'CRITICAL', schd.task_events_mgr.EVENT_SUBMIT_FAILED |
| 222 | + ) |
| 223 | + assert itask.state(TASK_STATUS_SUBMIT_FAILED) |
| 224 | + assert await get_ds_job_id(schd) == job_id |
| 225 | + |
| 226 | + assert db_select(schd, False, 'task_jobs', 'job_id', 'submit_status') == [ |
| 227 | + (job_id, 1) |
| 228 | + ] |
| 229 | + |
| 230 | + # Restart and check data store again: |
| 231 | + schd = scheduler(id_) |
| 232 | + async with start(schd): |
| 233 | + assert await get_ds_job_id(schd) == job_id |
| 234 | + |
| 235 | + |
192 | 236 | async def test__process_message_failed_with_retry(one, start, log_filter):
|
193 | 237 | """Log job failure, even if a retry is scheduled.
|
194 | 238 |
|
|
0 commit comments