@@ -963,10 +963,10 @@ def submit(self, sccs: list[SCC]) -> None:
963963 """Submit a stale SCC for processing in current process."""
964964 self .scc_queue .extend (sccs )
965965
966- def get_done (self , graph : Graph ) -> tuple [list [SCC ], bool ]:
966+ def wait_for_done (self , graph : Graph ) -> tuple [list [SCC ], bool ]:
967967 """Wait for a stale SCC processing (in process) to finish.
968968
969- Return nest processed SCC amd whether we have more in the queue.
969+ Return next processed SCC and whether we have more in the queue.
970970 This emulates the API we will have for parallel processing
971971 in multiple worker processes.
972972 """
@@ -3371,20 +3371,20 @@ def process_graph(graph: Graph, manager: BuildManager) -> None:
33713371 else :
33723372 not_ready .append (scc )
33733373
3374- processing = False
3375- while ready or not_ready or processing :
3374+ still_working = False
3375+ while ready or not_ready or still_working :
33763376 stale , fresh = find_stale_sccs (ready , graph , manager )
33773377 if stale :
33783378 manager .submit (stale )
3379- processing = True
3379+ still_working = True
33803380 # We eagerly walk over fresh SCCs to reach as many stale SCCs as soon
33813381 # as possible. Only when there are no fresh SCCs, we wait on scheduled stale ones.
33823382 # This strategy, similar to a naive strategy in minesweeper game, will allow us
33833383 # to leverage parallelism as much as possible.
33843384 if fresh :
33853385 done = fresh
33863386 else :
3387- done , processing = manager .get_done (graph )
3387+ done , still_working = manager .wait_for_done (graph )
33883388 ready = []
33893389 for done_scc in done :
33903390 for dependent in done_scc .direct_dependents :
0 commit comments