@@ -109,18 +109,8 @@ _PyCriticalSection_IsActive(uintptr_t tag)
109109static inline void
110110_PyCriticalSection_BeginMutex (PyCriticalSection * c , PyMutex * m )
111111{
112- // As an optimisation for locking the same object recursively, skip
113- // locking if the mutex is currently locked and the topmost, active,
114- // single critical section.
115- PyThreadState * tstate = _PyThreadState_GET ();
116- if (tstate -> critical_section &&
117- !(tstate -> critical_section & _Py_CRITICAL_SECTION_MASK ) &&
118- ((PyCriticalSection * )tstate -> critical_section )-> _cs_mutex == m ) {
119- c -> _cs_mutex = NULL ;
120- c -> _cs_prev = 0 ;
121- return ;
122- }
123112 if (PyMutex_LockFast (m )) {
113+ PyThreadState * tstate = _PyThreadState_GET ();
124114 c -> _cs_mutex = m ;
125115 c -> _cs_prev = tstate -> critical_section ;
126116 tstate -> critical_section = (uintptr_t )c ;
@@ -156,7 +146,7 @@ static inline void
156146_PyCriticalSection_End (PyCriticalSection * c )
157147{
158148 // If the mutex is NULL, we used the fast path in
159- // _PyCriticalSection_BeginMutex for locks already held in the top-most
149+ // _PyCriticalSection_BeginSlow for locks already held in the top-most
160150 // critical section, and we shouldn't unlock or pop this critical section.
161151 if (c -> _cs_mutex == NULL ) {
162152 return ;
@@ -216,7 +206,7 @@ static inline void
216206_PyCriticalSection2_End (PyCriticalSection2 * c )
217207{
218208 // if mutex1 is NULL, we used the fast path in
219- // _PyCriticalSection_BeginMutex for mutexes that are already held,
209+ // _PyCriticalSection_BeginSlow for mutexes that are already held,
220210 // which should only happen when mutex1 and mutex2 were the same mutex,
221211 // and mutex2 should also be NULL.
222212 if (c -> _cs_base ._cs_mutex == NULL ) {
0 commit comments