116116 "abc" ,
117117}
118118
119+ # We are careful now, we can increase this in future if safe/useful.
120+ MAX_GC_FREEZE_CYCLES = 1
119121
120122Graph : _TypeAlias = dict [str , "State" ]
121123
@@ -707,6 +709,8 @@ def __init__(
707709 # new file can be processed O(n**2) times. This cache
708710 # avoids most of this redundant work.
709711 self .ast_cache : dict [str , tuple [MypyFile , list [ErrorInfo ]]] = {}
712+ # Number of times we used GC optimization hack for fresh SCCs.
713+ self .gc_freeze_cycles = 0
710714
711715 def dump_stats (self ) -> None :
712716 if self .options .dump_build_stats :
@@ -3326,7 +3330,10 @@ def process_graph(graph: Graph, manager: BuildManager) -> None:
33263330 #
33273331 # TODO: see if it's possible to determine if we need to process only a
33283332 # _subset_ of the past SCCs instead of having to process them all.
3329- if platform .python_implementation () == "CPython" :
3333+ if (
3334+ platform .python_implementation () == "CPython"
3335+ and manager .gc_freeze_cycles < MAX_GC_FREEZE_CYCLES
3336+ ):
33303337 # When deserializing cache we create huge amount of new objects, so even
33313338 # with our generous GC thresholds, GC is still doing a lot of pointless
33323339 # work searching for garbage. So, we temporarily disable it when
@@ -3338,7 +3345,11 @@ def process_graph(graph: Graph, manager: BuildManager) -> None:
33383345 gc .disable ()
33393346 for prev_scc in fresh_scc_queue :
33403347 process_fresh_modules (graph , prev_scc , manager )
3341- if platform .python_implementation () == "CPython" :
3348+ if (
3349+ platform .python_implementation () == "CPython"
3350+ and manager .gc_freeze_cycles < MAX_GC_FREEZE_CYCLES
3351+ ):
3352+ manager .gc_freeze_cycles += 1
33423353 gc .freeze ()
33433354 gc .unfreeze ()
33443355 gc .enable ()
0 commit comments