Skip to content

Commit 616be87

Browse files
author
Peter Zijlstra
committed
locking/rwbase: Extract __rwbase_write_trylock()
The code in rwbase_write_lock() is a little non-obvious vs the read+set 'trylock', extract the sequence into a helper function to clarify the code. This also provides a single site to fix fast-path ordering. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Thomas Gleixner <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 7687201 commit 616be87

File tree

1 file changed

+26
-18
lines changed

1 file changed

+26
-18
lines changed

kernel/locking/rwbase_rt.c

Lines changed: 26 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -196,6 +196,19 @@ static inline void rwbase_write_downgrade(struct rwbase_rt *rwb)
196196
__rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags);
197197
}
198198

199+
static inline bool __rwbase_write_trylock(struct rwbase_rt *rwb)
200+
{
201+
/* Can do without CAS because we're serialized by wait_lock. */
202+
lockdep_assert_held(&rwb->rtmutex.wait_lock);
203+
204+
if (!atomic_read(&rwb->readers)) {
205+
atomic_set(&rwb->readers, WRITER_BIAS);
206+
return 1;
207+
}
208+
209+
return 0;
210+
}
211+
199212
static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
200213
unsigned int state)
201214
{
@@ -210,34 +223,30 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
210223
atomic_sub(READER_BIAS, &rwb->readers);
211224

212225
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
213-
/*
214-
* set_current_state() for rw_semaphore
215-
* current_save_and_set_rtlock_wait_state() for rwlock
216-
*/
217-
rwbase_set_and_save_current_state(state);
226+
if (__rwbase_write_trylock(rwb))
227+
goto out_unlock;
218228

219-
/* Block until all readers have left the critical section. */
220-
for (; atomic_read(&rwb->readers);) {
229+
rwbase_set_and_save_current_state(state);
230+
for (;;) {
221231
/* Optimized out for rwlocks */
222232
if (rwbase_signal_pending_state(state, current)) {
223233
rwbase_restore_current_state();
224234
__rwbase_write_unlock(rwb, 0, flags);
225235
return -EINTR;
226236
}
237+
238+
if (__rwbase_write_trylock(rwb))
239+
break;
240+
227241
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
242+
rwbase_schedule();
243+
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
228244

229-
/*
230-
* Schedule and wait for the readers to leave the critical
231-
* section. The last reader leaving it wakes the waiter.
232-
*/
233-
if (atomic_read(&rwb->readers) != 0)
234-
rwbase_schedule();
235245
set_current_state(state);
236-
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
237246
}
238-
239-
atomic_set(&rwb->readers, WRITER_BIAS);
240247
rwbase_restore_current_state();
248+
249+
out_unlock:
241250
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
242251
return 0;
243252
}
@@ -253,8 +262,7 @@ static inline int rwbase_write_trylock(struct rwbase_rt *rwb)
253262
atomic_sub(READER_BIAS, &rwb->readers);
254263

255264
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
256-
if (!atomic_read(&rwb->readers)) {
257-
atomic_set(&rwb->readers, WRITER_BIAS);
265+
if (__rwbase_write_trylock(rwb)) {
258266
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
259267
return 1;
260268
}

0 commit comments

Comments
 (0)