Skip to content

Commit 431f288

Browse files
committed
Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking fixes from Thomas Gleixner: "A set of locking fixes: - Address the fallout of the rwsem rework. Missing ACQUIREs and a sanity check to prevent a use-after-free - Add missing checks for unitialized mutexes when mutex debugging is enabled. - Remove the bogus code in the generic SMP variant of arch_futex_atomic_op_inuser() - Fixup the #ifdeffery in lockdep to prevent compile warnings" * 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: locking/mutex: Test for initialized mutex locking/lockdep: Clean up #ifdef checks locking/lockdep: Hide unused 'class' variable locking/rwsem: Add ACQUIRE comments tty/ldsem, locking/rwsem: Add missing ACQUIRE to read_failed sleep loop lcoking/rwsem: Add missing ACQUIRE to read_slowpath sleep loop locking/rwsem: Add missing ACQUIRE to read_slowpath exit when queue is empty locking/rwsem: Don't call owner_on_cpu() on read-owner futex: Cleanup generic SMP variant of arch_futex_atomic_op_inuser()
2 parents 13fbe99 + 6c11c6e commit 431f288

File tree

6 files changed

+43
-38
lines changed

6 files changed

+43
-38
lines changed

drivers/tty/tty_ldsem.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -93,8 +93,7 @@ static void __ldsem_wake_readers(struct ld_semaphore *sem)
9393

9494
list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
9595
tsk = waiter->task;
96-
smp_mb();
97-
waiter->task = NULL;
96+
smp_store_release(&waiter->task, NULL);
9897
wake_up_process(tsk);
9998
put_task_struct(tsk);
10099
}
@@ -194,7 +193,7 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
194193
for (;;) {
195194
set_current_state(TASK_UNINTERRUPTIBLE);
196195

197-
if (!waiter.task)
196+
if (!smp_load_acquire(&waiter.task))
198197
break;
199198
if (!timeout)
200199
break;

include/asm-generic/futex.h

Lines changed: 1 addition & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -118,26 +118,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
118118
static inline int
119119
arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
120120
{
121-
int oldval = 0, ret;
122-
123-
pagefault_disable();
124-
125-
switch (op) {
126-
case FUTEX_OP_SET:
127-
case FUTEX_OP_ADD:
128-
case FUTEX_OP_OR:
129-
case FUTEX_OP_ANDN:
130-
case FUTEX_OP_XOR:
131-
default:
132-
ret = -ENOSYS;
133-
}
134-
135-
pagefault_enable();
136-
137-
if (!ret)
138-
*oval = oldval;
139-
140-
return ret;
121+
return -ENOSYS;
141122
}
142123

143124
static inline int

kernel/locking/lockdep.c

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -448,7 +448,7 @@ static void print_lockdep_off(const char *bug_msg)
448448

449449
unsigned long nr_stack_trace_entries;
450450

451-
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
451+
#ifdef CONFIG_PROVE_LOCKING
452452
/*
453453
* Stack-trace: tightly packed array of stack backtrace
454454
* addresses. Protected by the graph_lock.
@@ -491,7 +491,7 @@ unsigned int max_lockdep_depth;
491491
DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
492492
#endif
493493

494-
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
494+
#ifdef CONFIG_PROVE_LOCKING
495495
/*
496496
* Locking printouts:
497497
*/
@@ -2969,7 +2969,7 @@ static void check_chain_key(struct task_struct *curr)
29692969
#endif
29702970
}
29712971

2972-
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
2972+
#ifdef CONFIG_PROVE_LOCKING
29732973
static int mark_lock(struct task_struct *curr, struct held_lock *this,
29742974
enum lock_usage_bit new_bit);
29752975

@@ -3608,7 +3608,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
36083608
return ret;
36093609
}
36103610

3611-
#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
3611+
#else /* CONFIG_PROVE_LOCKING */
36123612

36133613
static inline int
36143614
mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
@@ -3627,7 +3627,7 @@ static inline int separate_irq_context(struct task_struct *curr,
36273627
return 0;
36283628
}
36293629

3630-
#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
3630+
#endif /* CONFIG_PROVE_LOCKING */
36313631

36323632
/*
36333633
* Initialize a lock instance's lock-class mapping info:
@@ -4321,8 +4321,7 @@ static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie
43214321
*/
43224322
static void check_flags(unsigned long flags)
43234323
{
4324-
#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
4325-
defined(CONFIG_TRACE_IRQFLAGS)
4324+
#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
43264325
if (!debug_locks)
43274326
return;
43284327

kernel/locking/lockdep_proc.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -200,7 +200,6 @@ static void lockdep_stats_debug_show(struct seq_file *m)
200200

201201
static int lockdep_stats_show(struct seq_file *m, void *v)
202202
{
203-
struct lock_class *class;
204203
unsigned long nr_unused = 0, nr_uncategorized = 0,
205204
nr_irq_safe = 0, nr_irq_unsafe = 0,
206205
nr_softirq_safe = 0, nr_softirq_unsafe = 0,
@@ -211,6 +210,8 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
211210
sum_forward_deps = 0;
212211

213212
#ifdef CONFIG_PROVE_LOCKING
213+
struct lock_class *class;
214+
214215
list_for_each_entry(class, &all_lock_classes, lock_entry) {
215216

216217
if (class->usage_mask == 0)

kernel/locking/mutex.c

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -908,6 +908,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
908908

909909
might_sleep();
910910

911+
#ifdef CONFIG_DEBUG_MUTEXES
912+
DEBUG_LOCKS_WARN_ON(lock->magic != lock);
913+
#endif
914+
911915
ww = container_of(lock, struct ww_mutex, base);
912916
if (use_ww_ctx && ww_ctx) {
913917
if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
@@ -1379,8 +1383,13 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
13791383
*/
13801384
int __sched mutex_trylock(struct mutex *lock)
13811385
{
1382-
bool locked = __mutex_trylock(lock);
1386+
bool locked;
1387+
1388+
#ifdef CONFIG_DEBUG_MUTEXES
1389+
DEBUG_LOCKS_WARN_ON(lock->magic != lock);
1390+
#endif
13831391

1392+
locked = __mutex_trylock(lock);
13841393
if (locked)
13851394
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
13861395

kernel/locking/rwsem.c

Lines changed: 22 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -666,7 +666,11 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
666666
preempt_disable();
667667
rcu_read_lock();
668668
owner = rwsem_owner_flags(sem, &flags);
669-
if ((flags & nonspinnable) || (owner && !owner_on_cpu(owner)))
669+
/*
670+
* Don't check the read-owner as the entry may be stale.
671+
*/
672+
if ((flags & nonspinnable) ||
673+
(owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
670674
ret = false;
671675
rcu_read_unlock();
672676
preempt_enable();
@@ -1000,6 +1004,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
10001004
atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
10011005
adjustment = 0;
10021006
if (rwsem_optimistic_spin(sem, false)) {
1007+
/* rwsem_optimistic_spin() implies ACQUIRE on success */
10031008
/*
10041009
* Wake up other readers in the wait list if the front
10051010
* waiter is a reader.
@@ -1014,6 +1019,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
10141019
}
10151020
return sem;
10161021
} else if (rwsem_reader_phase_trylock(sem, waiter.last_rowner)) {
1022+
/* rwsem_reader_phase_trylock() implies ACQUIRE on success */
10171023
return sem;
10181024
}
10191025

@@ -1032,6 +1038,8 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
10321038
*/
10331039
if (adjustment && !(atomic_long_read(&sem->count) &
10341040
(RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
1041+
/* Provide lock ACQUIRE */
1042+
smp_acquire__after_ctrl_dep();
10351043
raw_spin_unlock_irq(&sem->wait_lock);
10361044
rwsem_set_reader_owned(sem);
10371045
lockevent_inc(rwsem_rlock_fast);
@@ -1065,15 +1073,18 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
10651073
wake_up_q(&wake_q);
10661074

10671075
/* wait to be given the lock */
1068-
while (true) {
1076+
for (;;) {
10691077
set_current_state(state);
1070-
if (!waiter.task)
1078+
if (!smp_load_acquire(&waiter.task)) {
1079+
/* Matches rwsem_mark_wake()'s smp_store_release(). */
10711080
break;
1081+
}
10721082
if (signal_pending_state(state, current)) {
10731083
raw_spin_lock_irq(&sem->wait_lock);
10741084
if (waiter.task)
10751085
goto out_nolock;
10761086
raw_spin_unlock_irq(&sem->wait_lock);
1087+
/* Ordered by sem->wait_lock against rwsem_mark_wake(). */
10771088
break;
10781089
}
10791090
schedule();
@@ -1083,6 +1094,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
10831094
__set_current_state(TASK_RUNNING);
10841095
lockevent_inc(rwsem_rlock);
10851096
return sem;
1097+
10861098
out_nolock:
10871099
list_del(&waiter.list);
10881100
if (list_empty(&sem->wait_list)) {
@@ -1123,8 +1135,10 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
11231135

11241136
/* do optimistic spinning and steal lock if possible */
11251137
if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) &&
1126-
rwsem_optimistic_spin(sem, true))
1138+
rwsem_optimistic_spin(sem, true)) {
1139+
/* rwsem_optimistic_spin() implies ACQUIRE on success */
11271140
return sem;
1141+
}
11281142

11291143
/*
11301144
* Disable reader optimistic spinning for this rwsem after
@@ -1184,9 +1198,11 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
11841198
wait:
11851199
/* wait until we successfully acquire the lock */
11861200
set_current_state(state);
1187-
while (true) {
1188-
if (rwsem_try_write_lock(sem, wstate))
1201+
for (;;) {
1202+
if (rwsem_try_write_lock(sem, wstate)) {
1203+
/* rwsem_try_write_lock() implies ACQUIRE on success */
11891204
break;
1205+
}
11901206

11911207
raw_spin_unlock_irq(&sem->wait_lock);
11921208

0 commit comments

Comments
 (0)