@@ -753,18 +753,20 @@ move_unreachable(PyGC_Head *young, PyGC_Head *unreachable)
753753* and is much faster than a more complex approach that
754754* would untrack all relevant tuples.
755755*/
756- static void
756+ static Py_ssize_t
757757untrack_tuples (PyGC_Head * head )
758758{
759+ Py_ssize_t untracked = 0 ;
759760 PyGC_Head * gc = GC_NEXT (head );
760761 while (gc != head ) {
761762 PyObject * op = FROM_GC (gc );
762763 PyGC_Head * next = GC_NEXT (gc );
763764 if (PyTuple_CheckExact (op )) {
764- _PyTuple_MaybeUntrack (op );
765+ untracked += _PyTuple_MaybeUntrack (op );
765766 }
766767 gc = next ;
767768 }
769+ return untracked ;
768770}
769771
770772/* Return true if object has a pre-PEP 442 finalization method. */
@@ -1376,7 +1378,7 @@ gc_collect_young(PyThreadState *tstate,
13761378 validate_spaces (gcstate );
13771379 PyGC_Head * young = & gcstate -> young .head ;
13781380 PyGC_Head * visited = & gcstate -> old [gcstate -> visited_space ].head ;
1379- untrack_tuples (young );
1381+ stats -> untracked_tuples += untrack_tuples (young );
13801382 GC_STAT_ADD (0 , collections , 1 );
13811383
13821384 PyGC_Head survivors ;
@@ -1654,7 +1656,7 @@ gc_collect_increment(PyThreadState *tstate, struct gc_collection_stats *stats)
16541656 GC_STAT_ADD (1 , collections , 1 );
16551657 GCState * gcstate = & tstate -> interp -> gc ;
16561658 gcstate -> work_to_do += assess_work_to_do (gcstate );
1657- untrack_tuples (& gcstate -> young .head );
1659+ stats -> untracked_tuples += untrack_tuples (& gcstate -> young .head );
16581660 if (gcstate -> phase == GC_PHASE_MARK ) {
16591661 Py_ssize_t objects_marked = mark_at_start (tstate );
16601662 GC_STAT_ADD (1 , objects_transitively_reachable , objects_marked );
@@ -1716,7 +1718,7 @@ gc_collect_full(PyThreadState *tstate,
17161718 PyGC_Head * young = & gcstate -> young .head ;
17171719 PyGC_Head * pending = & gcstate -> old [gcstate -> visited_space ^1 ].head ;
17181720 PyGC_Head * visited = & gcstate -> old [gcstate -> visited_space ].head ;
1719- untrack_tuples (young );
1721+ stats -> untracked_tuples += untrack_tuples (young );
17201722 /* merge all generations into visited */
17211723 gc_list_merge (young , pending );
17221724 gc_list_validate_space (pending , 1 - gcstate -> visited_space );
@@ -1756,7 +1758,7 @@ gc_collect_region(PyThreadState *tstate,
17561758 gc_list_init (& unreachable );
17571759 deduce_unreachable (from , & unreachable );
17581760 validate_consistent_old_space (from );
1759- untrack_tuples (from );
1761+ stats -> untracked_tuples += untrack_tuples (from );
17601762
17611763 /* Move reachable objects to next generation. */
17621764 validate_consistent_old_space (to );
@@ -2098,12 +2100,22 @@ _PyGC_Collect(PyThreadState *tstate, int generation, _PyGC_Reason reason)
20982100 default :
20992101 Py_UNREACHABLE ();
21002102 }
2103+ gcstate -> generation_stats [generation ].untracked_tuples += stats .untracked_tuples ;
21012104 if (PyDTrace_GC_DONE_ENABLED ()) {
21022105 PyDTrace_GC_DONE (stats .uncollectable + stats .collected );
21032106 }
21042107 if (reason != _Py_GC_REASON_SHUTDOWN ) {
21052108 invoke_gc_callback (gcstate , "stop" , generation , & stats );
21062109 }
2110+ else {
2111+ FILE * out = stderr ;
2112+ for (int i = 0 ; i < NUM_GENERATIONS ; i ++ ) {
2113+ fprintf (out , "GC[%d] collections : %zd\n" , i , gcstate -> generation_stats [i ].collections );
2114+ fprintf (out , "GC[%d] collected : %zd\n" , i , gcstate -> generation_stats [i ].collected );
2115+ fprintf (out , "GC[%d] uncollectable : %zd\n" , i , gcstate -> generation_stats [i ].uncollectable );
2116+ fprintf (out , "GC[%d] untracked_tuples: %zd\n" , i , gcstate -> generation_stats [i ].untracked_tuples );
2117+ }
2118+ }
21072119 _PyErr_SetRaisedException (tstate , exc );
21082120 GC_STAT_ADD (generation , objects_collected , stats .collected );
21092121#ifdef Py_STATS
0 commit comments