@@ -1303,18 +1303,26 @@ static void
13031303process_interp_queue (struct _Py_mem_interp_free_queue * queue ,
13041304 struct _qsbr_thread_state * qsbr , delayed_dealloc_cb cb ,
13051305 void * state )
1306+ {
1307+ assert (PyMutex_IsLocked (& queue -> mutex ));
1308+ process_queue (& queue -> head , qsbr , false, cb , state );
1309+
1310+ int more_work = !llist_empty (& queue -> head );
1311+ _Py_atomic_store_int_relaxed (& queue -> has_work , more_work );
1312+ }
1313+
1314+ static void
1315+ maybe_process_interp_queue (struct _Py_mem_interp_free_queue * queue ,
1316+ struct _qsbr_thread_state * qsbr , delayed_dealloc_cb cb ,
1317+ void * state )
13061318{
13071319 if (!_Py_atomic_load_int_relaxed (& queue -> has_work )) {
13081320 return ;
13091321 }
13101322
13111323 // Try to acquire the lock, but don't block if it's already held.
13121324 if (_PyMutex_LockTimed (& queue -> mutex , 0 , 0 ) == PY_LOCK_ACQUIRED ) {
1313- process_queue (& queue -> head , qsbr , false, cb , state );
1314-
1315- int more_work = !llist_empty (& queue -> head );
1316- _Py_atomic_store_int_relaxed (& queue -> has_work , more_work );
1317-
1325+ process_interp_queue (queue , qsbr , cb , state );
13181326 PyMutex_Unlock (& queue -> mutex );
13191327 }
13201328}
@@ -1329,7 +1337,7 @@ _PyMem_ProcessDelayed(PyThreadState *tstate)
13291337 process_queue (& tstate_impl -> mem_free_queue , tstate_impl -> qsbr , true, NULL , NULL );
13301338
13311339 // Process shared interpreter work
1332- process_interp_queue (& interp -> mem_free_queue , tstate_impl -> qsbr , NULL , NULL );
1340+ maybe_process_interp_queue (& interp -> mem_free_queue , tstate_impl -> qsbr , NULL , NULL );
13331341}
13341342
13351343void
@@ -1342,7 +1350,7 @@ _PyMem_ProcessDelayedNoDealloc(PyThreadState *tstate, delayed_dealloc_cb cb, voi
13421350 process_queue (& tstate_impl -> mem_free_queue , tstate_impl -> qsbr , true, cb , state );
13431351
13441352 // Process shared interpreter work
1345- process_interp_queue (& interp -> mem_free_queue , tstate_impl -> qsbr , cb , state );
1353+ maybe_process_interp_queue (& interp -> mem_free_queue , tstate_impl -> qsbr , cb , state );
13461354}
13471355
13481356void
@@ -1364,10 +1372,15 @@ _PyMem_AbandonDelayed(PyThreadState *tstate)
13641372 return ;
13651373 }
13661374
1367- // Merge the thread's work queue into the interpreter's work queue.
13681375 PyMutex_Lock (& interp -> mem_free_queue .mutex );
1376+
1377+ // Merge the thread's work queue into the interpreter's work queue.
13691378 llist_concat (& interp -> mem_free_queue .head , queue );
1370- _Py_atomic_store_int_relaxed (& interp -> mem_free_queue .has_work , 1 );
1379+
1380+ // Process the merged queue now (see gh-130794).
1381+ _PyThreadStateImpl * this_tstate = (_PyThreadStateImpl * )tstate ;
1382+ process_interp_queue (& interp -> mem_free_queue , this_tstate -> qsbr , NULL , NULL );
1383+
13711384 PyMutex_Unlock (& interp -> mem_free_queue .mutex );
13721385
13731386 assert (llist_empty (queue )); // the thread's queue is now empty
0 commit comments