Skip to content

Commit 6ffddfb

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
locking/rwsem: Add ACQUIRE comments
Since we just reviewed read_slowpath for ACQUIRE correctness, add a few coments to retain our findings. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Acked-by: Will Deacon <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
1 parent 952041a commit 6ffddfb

File tree

1 file changed

+13
-5
lines changed

1 file changed

+13
-5
lines changed

kernel/locking/rwsem.c

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1004,6 +1004,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
10041004
atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
10051005
adjustment = 0;
10061006
if (rwsem_optimistic_spin(sem, false)) {
1007+
/* rwsem_optimistic_spin() implies ACQUIRE on success */
10071008
/*
10081009
* Wake up other readers in the wait list if the front
10091010
* waiter is a reader.
@@ -1018,6 +1019,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
10181019
}
10191020
return sem;
10201021
} else if (rwsem_reader_phase_trylock(sem, waiter.last_rowner)) {
1022+
/* rwsem_reader_phase_trylock() implies ACQUIRE on success */
10211023
return sem;
10221024
}
10231025

@@ -1071,17 +1073,18 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
10711073
wake_up_q(&wake_q);
10721074

10731075
/* wait to be given the lock */
1074-
while (true) {
1076+
for (;;) {
10751077
set_current_state(state);
10761078
if (!smp_load_acquire(&waiter.task)) {
1077-
/* Orders against rwsem_mark_wake()'s smp_store_release() */
1079+
/* Matches rwsem_mark_wake()'s smp_store_release(). */
10781080
break;
10791081
}
10801082
if (signal_pending_state(state, current)) {
10811083
raw_spin_lock_irq(&sem->wait_lock);
10821084
if (waiter.task)
10831085
goto out_nolock;
10841086
raw_spin_unlock_irq(&sem->wait_lock);
1087+
/* Ordered by sem->wait_lock against rwsem_mark_wake(). */
10851088
break;
10861089
}
10871090
schedule();
@@ -1091,6 +1094,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
10911094
__set_current_state(TASK_RUNNING);
10921095
lockevent_inc(rwsem_rlock);
10931096
return sem;
1097+
10941098
out_nolock:
10951099
list_del(&waiter.list);
10961100
if (list_empty(&sem->wait_list)) {
@@ -1131,8 +1135,10 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
11311135

11321136
/* do optimistic spinning and steal lock if possible */
11331137
if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) &&
1134-
rwsem_optimistic_spin(sem, true))
1138+
rwsem_optimistic_spin(sem, true)) {
1139+
/* rwsem_optimistic_spin() implies ACQUIRE on success */
11351140
return sem;
1141+
}
11361142

11371143
/*
11381144
* Disable reader optimistic spinning for this rwsem after
@@ -1192,9 +1198,11 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
11921198
wait:
11931199
/* wait until we successfully acquire the lock */
11941200
set_current_state(state);
1195-
while (true) {
1196-
if (rwsem_try_write_lock(sem, wstate))
1201+
for (;;) {
1202+
if (rwsem_try_write_lock(sem, wstate)) {
1203+
/* rwsem_try_write_lock() implies ACQUIRE on success */
11971204
break;
1205+
}
11981206

11991207
raw_spin_unlock_irq(&sem->wait_lock);
12001208

0 commit comments

Comments
 (0)