@@ -734,13 +734,25 @@ move_unreachable(PyGC_Head *young, PyGC_Head *unreachable)
734734 unreachable -> _gc_next &= _PyGC_PREV_MASK ;
735735}
736736
737+ /* In theory, all tuples should be younger than the
738+ * objects they refer to, as tuples are immortal.
739+ * Therefore, untracking tuples in oldest-first order in the
740+ * young generation before promoting them should have tracked
741+ * all the tuples that can be untracked.
742+ *
743+ * Unfortunately, the C API allows tuples to be created
744+ * and then filled in. So this won't untrack all tuples
745+ * that can be untracked. It should untrack most of them
746+ * and is much faster than a more complex approach that
747+ * would untrack all relevant tuples.
748+ */
737749static void
738750untrack_tuples (PyGC_Head * head )
739751{
740- PyGC_Head * next , * gc = GC_NEXT (head );
752+ PyGC_Head * gc = GC_NEXT (head );
741753 while (gc != head ) {
742754 PyObject * op = FROM_GC (gc );
743- next = GC_NEXT (gc );
755+ PyGC_Head * next = GC_NEXT (gc );
744756 if (PyTuple_CheckExact (op )) {
745757 _PyTuple_MaybeUntrack (op );
746758 }
@@ -1553,7 +1565,7 @@ assess_work_to_do(GCState *gcstate)
15531565 scale_factor = 2 ;
15541566 }
15551567 intptr_t new_objects = gcstate -> young .count ;
1556- intptr_t max_heap_fraction = new_objects * 3 /2 ;
1568+ intptr_t max_heap_fraction = new_objects * 3 /2 ;
15571569 intptr_t heap_fraction = gcstate -> heap_size / SCAN_RATE_DIVISOR / scale_factor ;
15581570 if (heap_fraction > max_heap_fraction ) {
15591571 heap_fraction = max_heap_fraction ;
@@ -1569,12 +1581,13 @@ gc_collect_increment(PyThreadState *tstate, struct gc_collection_stats *stats)
15691581 GCState * gcstate = & tstate -> interp -> gc ;
15701582 gcstate -> work_to_do += assess_work_to_do (gcstate );
15711583 untrack_tuples (& gcstate -> young .head );
1572- // if (gcstate->phase == GC_PHASE_MARK) {
1573- // Py_ssize_t objects_marked = mark_at_start(tstate);
1574- // GC_STAT_ADD(1, objects_transitively_reachable, objects_marked);
1575- // gcstate->work_to_do -= objects_marked;
1576- // return;
1577- // }
1584+ if (gcstate -> phase == GC_PHASE_MARK ) {
1585+ Py_ssize_t objects_marked = mark_at_start (tstate );
1586+ GC_STAT_ADD (1 , objects_transitively_reachable , objects_marked );
1587+ gcstate -> work_to_do -= objects_marked ;
1588+ validate_spaces (gcstate );
1589+ return ;
1590+ }
15781591 PyGC_Head * not_visited = & gcstate -> old [gcstate -> visited_space ^1 ].head ;
15791592 PyGC_Head * visited = & gcstate -> old [gcstate -> visited_space ].head ;
15801593 PyGC_Head increment ;
@@ -1583,7 +1596,7 @@ gc_collect_increment(PyThreadState *tstate, struct gc_collection_stats *stats)
15831596 if (scale_factor < 2 ) {
15841597 scale_factor = 2 ;
15851598 }
1586- intptr_t objects_marked = 0 ; // mark_stacks(tstate->interp, visited, gcstate->visited_space, false);
1599+ intptr_t objects_marked = mark_stacks (tstate -> interp , visited , gcstate -> visited_space , false);
15871600 GC_STAT_ADD (1 , objects_transitively_reachable , objects_marked );
15881601 gcstate -> work_to_do -= objects_marked ;
15891602 gc_list_set_space (& gcstate -> young .head , gcstate -> visited_space );
@@ -1645,7 +1658,6 @@ gc_collect_full(PyThreadState *tstate,
16451658 gcstate -> old [0 ].count = 0 ;
16461659 gcstate -> old [1 ].count = 0 ;
16471660 completed_cycle (gcstate );
1648- gcstate -> work_to_do = - gcstate -> young .threshold * 2 ;
16491661 _PyGC_ClearAllFreeLists (tstate -> interp );
16501662 validate_spaces (gcstate );
16511663 add_stats (gcstate , 2 , stats );
0 commit comments