Skip to content

Commit ecd12ac

Browse files
committed
Move the newly added fast path to the start of the slow path, since it
should be relatively rare and we don't want to burden the fastest path.
1 parent ddb794a commit ecd12ac

File tree

2 files changed

+20
-20
lines changed

2 files changed

+20
-20
lines changed

Include/internal/pycore_critical_section.h

Lines changed: 3 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -109,18 +109,8 @@ _PyCriticalSection_IsActive(uintptr_t tag)
109109
static inline void
110110
_PyCriticalSection_BeginMutex(PyCriticalSection *c, PyMutex *m)
111111
{
112-
// As an optimisation for locking the same object recursively, skip
113-
// locking if the mutex is currently locked and the topmost, active,
114-
// single critical section.
115-
PyThreadState *tstate = _PyThreadState_GET();
116-
if (tstate->critical_section &&
117-
!(tstate->critical_section & _Py_CRITICAL_SECTION_MASK) &&
118-
((PyCriticalSection *)tstate->critical_section)->_cs_mutex == m) {
119-
c->_cs_mutex = NULL;
120-
c->_cs_prev = 0;
121-
return;
122-
}
123112
if (PyMutex_LockFast(m)) {
113+
PyThreadState *tstate = _PyThreadState_GET();
124114
c->_cs_mutex = m;
125115
c->_cs_prev = tstate->critical_section;
126116
tstate->critical_section = (uintptr_t)c;
@@ -156,7 +146,7 @@ static inline void
156146
_PyCriticalSection_End(PyCriticalSection *c)
157147
{
158148
// If the mutex is NULL, we used the fast path in
159-
// _PyCriticalSection_BeginMutex for locks already held in the top-most
149+
// _PyCriticalSection_BeginSlow for locks already held in the top-most
160150
// critical section, and we shouldn't unlock or pop this critical section.
161151
if (c->_cs_mutex == NULL) {
162152
return;
@@ -216,7 +206,7 @@ static inline void
216206
_PyCriticalSection2_End(PyCriticalSection2 *c)
217207
{
218208
// if mutex1 is NULL, we used the fast path in
219-
// _PyCriticalSection_BeginMutex for mutexes that are already held,
209+
// _PyCriticalSection_BeginSlow for mutexes that are already held,
220210
// which should only happen when mutex1 and mutex2 were the same mutex,
221211
// and mutex2 should also be NULL.
222212
if (c->_cs_base._cs_mutex == NULL) {

Python/critical_section.c

Lines changed: 17 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,28 @@ static_assert(_Alignof(PyCriticalSection) >= 4,
88
"critical section must be aligned to at least 4 bytes");
99
#endif
1010

11+
#ifdef Py_GIL_DISABLED
12+
static PyCriticalSection *
13+
untag_critical_section(uintptr_t tag)
14+
{
15+
return (PyCriticalSection *)(tag & ~_Py_CRITICAL_SECTION_MASK);
16+
}
17+
#endif
18+
1119
void
1220
_PyCriticalSection_BeginSlow(PyCriticalSection *c, PyMutex *m)
1321
{
1422
#ifdef Py_GIL_DISABLED
1523
PyThreadState *tstate = _PyThreadState_GET();
24+
// As an optimisation for locking the same object recursively, skip
25+
// locking if the mutex is currently locked by the top-most critical
26+
// section.
27+
if (tstate->critical_section &&
28+
untag_critical_section(tstate->critical_section)->_cs_mutex == m) {
29+
c->_cs_mutex = NULL;
30+
c->_cs_prev = 0;
31+
return;
32+
}
1633
c->_cs_mutex = NULL;
1734
c->_cs_prev = (uintptr_t)tstate->critical_section;
1835
tstate->critical_section = (uintptr_t)c;
@@ -42,13 +59,6 @@ _PyCriticalSection2_BeginSlow(PyCriticalSection2 *c, PyMutex *m1, PyMutex *m2,
4259
#endif
4360
}
4461

45-
#ifdef Py_GIL_DISABLED
46-
static PyCriticalSection *
47-
untag_critical_section(uintptr_t tag)
48-
{
49-
return (PyCriticalSection *)(tag & ~_Py_CRITICAL_SECTION_MASK);
50-
}
51-
#endif
5262

5363
// Release all locks held by critical sections. This is called by
5464
// _PyThreadState_Detach.

0 commit comments

Comments
 (0)