@@ -1238,15 +1238,15 @@ work_queue_first(struct llist_node *head)
12381238}
12391239
12401240static void
1241- process_queue (struct llist_node * head , struct _qsbr_thread_state * qsbr ,
1241+ process_queue (struct llist_node * head , _PyThreadStateImpl * tstate ,
12421242 bool keep_empty , delayed_dealloc_cb cb , void * state )
12431243{
12441244 while (!llist_empty (head )) {
12451245 struct _mem_work_chunk * buf = work_queue_first (head );
12461246
12471247 if (buf -> rd_idx < buf -> wr_idx ) {
12481248 struct _mem_work_item * item = & buf -> array [buf -> rd_idx ];
1249- if (!_Py_qsbr_poll (qsbr , item -> qsbr_goal )) {
1249+ if (!_Py_qsbr_poll (tstate -> qsbr , item -> qsbr_goal )) {
12501250 return ;
12511251 }
12521252
@@ -1270,19 +1270,19 @@ process_queue(struct llist_node *head, struct _qsbr_thread_state *qsbr,
12701270
12711271static void
12721272process_interp_queue (struct _Py_mem_interp_free_queue * queue ,
1273- struct _qsbr_thread_state * qsbr , delayed_dealloc_cb cb ,
1273+ _PyThreadStateImpl * tstate , delayed_dealloc_cb cb ,
12741274 void * state )
12751275{
12761276 assert (PyMutex_IsLocked (& queue -> mutex ));
1277- process_queue (& queue -> head , qsbr , false, cb , state );
1277+ process_queue (& queue -> head , tstate , false, cb , state );
12781278
12791279 int more_work = !llist_empty (& queue -> head );
12801280 _Py_atomic_store_int_relaxed (& queue -> has_work , more_work );
12811281}
12821282
12831283static void
12841284maybe_process_interp_queue (struct _Py_mem_interp_free_queue * queue ,
1285- struct _qsbr_thread_state * qsbr , delayed_dealloc_cb cb ,
1285+ _PyThreadStateImpl * tstate , delayed_dealloc_cb cb ,
12861286 void * state )
12871287{
12881288 if (!_Py_atomic_load_int_relaxed (& queue -> has_work )) {
@@ -1291,7 +1291,7 @@ maybe_process_interp_queue(struct _Py_mem_interp_free_queue *queue,
12911291
12921292 // Try to acquire the lock, but don't block if it's already held.
12931293 if (_PyMutex_LockTimed (& queue -> mutex , 0 , 0 ) == PY_LOCK_ACQUIRED ) {
1294- process_interp_queue (queue , qsbr , cb , state );
1294+ process_interp_queue (queue , tstate , cb , state );
12951295 PyMutex_Unlock (& queue -> mutex );
12961296 }
12971297}
@@ -1303,10 +1303,10 @@ _PyMem_ProcessDelayed(PyThreadState *tstate)
13031303 _PyThreadStateImpl * tstate_impl = (_PyThreadStateImpl * )tstate ;
13041304
13051305 // Process thread-local work
1306- process_queue (& tstate_impl -> mem_free_queue , tstate_impl -> qsbr , true, NULL , NULL );
1306+ process_queue (& tstate_impl -> mem_free_queue , tstate_impl , true, NULL , NULL );
13071307
13081308 // Process shared interpreter work
1309- maybe_process_interp_queue (& interp -> mem_free_queue , tstate_impl -> qsbr , NULL , NULL );
1309+ maybe_process_interp_queue (& interp -> mem_free_queue , tstate_impl , NULL , NULL );
13101310}
13111311
13121312void
@@ -1316,10 +1316,10 @@ _PyMem_ProcessDelayedNoDealloc(PyThreadState *tstate, delayed_dealloc_cb cb, voi
13161316 _PyThreadStateImpl * tstate_impl = (_PyThreadStateImpl * )tstate ;
13171317
13181318 // Process thread-local work
1319- process_queue (& tstate_impl -> mem_free_queue , tstate_impl -> qsbr , true, cb , state );
1319+ process_queue (& tstate_impl -> mem_free_queue , tstate_impl , true, cb , state );
13201320
13211321 // Process shared interpreter work
1322- maybe_process_interp_queue (& interp -> mem_free_queue , tstate_impl -> qsbr , cb , state );
1322+ maybe_process_interp_queue (& interp -> mem_free_queue , tstate_impl , cb , state );
13231323}
13241324
13251325void
@@ -1348,7 +1348,7 @@ _PyMem_AbandonDelayed(PyThreadState *tstate)
13481348
13491349 // Process the merged queue now (see gh-130794).
13501350 _PyThreadStateImpl * this_tstate = (_PyThreadStateImpl * )_PyThreadState_GET ();
1351- process_interp_queue (& interp -> mem_free_queue , this_tstate -> qsbr , NULL , NULL );
1351+ process_interp_queue (& interp -> mem_free_queue , this_tstate , NULL , NULL );
13521352
13531353 PyMutex_Unlock (& interp -> mem_free_queue .mutex );
13541354
0 commit comments