Skip to content

Commit ab3a5bd

Browse files
committed
move status table updates for various tables together in scale out
TODO: this reveals a possible bug here that FAILED entries in simulated status are not immedaitely sent, but instead only get sent at the next poller update? unless submitted entries which are sent immediately? that should be an easy fix after this PR, though...
1 parent 978f433 commit ab3a5bd

File tree

1 file changed

+9
-5
lines changed

1 file changed

+9
-5
lines changed

parsl/executors/status_handling.py

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -184,23 +184,27 @@ def scale_out_facade(self, n: int) -> List[str]:
184184
if not self.provider:
185185
raise ScalingFailed(self, "No execution provider available")
186186
block_ids = []
187+
monitoring_status_changes = {}
187188
logger.info(f"Scaling out by {n} blocks")
188189
for _ in range(n):
189190
block_id = str(self._block_id_counter.get_id())
190191
logger.info(f"Allocated block ID {block_id}")
191192
try:
192193
job_id = self._launch_block(block_id)
194+
195+
pending_status = JobStatus(JobState.PENDING)
196+
193197
self.blocks_to_job_id[block_id] = job_id
194198
self.job_ids_to_block[job_id] = block_id
199+
self._status[block_id] = pending_status
200+
201+
monitoring_status_changes[block_id] = pending_status
195202
block_ids.append(block_id)
203+
196204
except Exception as ex:
197205
self._simulated_status[block_id] = JobStatus(JobState.FAILED, "Failed to start block {}: {}".format(block_id, ex))
198206

199-
new_status = {}
200-
for block_id in block_ids:
201-
new_status[block_id] = JobStatus(JobState.PENDING)
202-
self.send_monitoring_info(new_status)
203-
self._status.update(new_status)
207+
self.send_monitoring_info(monitoring_status_changes)
204208
return block_ids
205209

206210
def scale_in(self, blocks: int) -> List[str]:

0 commit comments

Comments
 (0)