Skip to content

Commit 732a27a

Browse files
committed
Merge tag 'locking-urgent-2021-05-09' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking fixes from Thomas Gleixner: "A set of locking related fixes and updates: - Two fixes for the futex syscall related to the timeout handling. FUTEX_LOCK_PI does not support the FUTEX_CLOCK_REALTIME bit and because it's not set the time namespace adjustment for clock MONOTONIC is applied wrongly. FUTEX_WAIT cannot support the FUTEX_CLOCK_REALTIME bit because its always a relative timeout. - Cleanups in the futex syscall entry points which became obvious when the two timeout handling bugs were fixed. - Cleanup of queued_write_lock_slowpath() as suggested by Linus - Fixup of the smp_call_function_single_async() prototype" * tag 'locking-urgent-2021-05-09' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: futex: Make syscall entry points less convoluted futex: Get rid of the val2 conditional dance futex: Do not apply time namespace adjustment on FUTEX_LOCK_PI Revert 337f130 ("futex: Allow FUTEX_CLOCK_REALTIME with FUTEX_WAIT op") locking/qrwlock: Cleanup queued_write_lock_slowpath() smp: Fix smp_call_function_single_async prototype
2 parents 85bbba1 + 51cf94d commit 732a27a

File tree

5 files changed

+58
-60
lines changed

5 files changed

+58
-60
lines changed

include/linux/smp.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
5353
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
5454
void *info, bool wait, const struct cpumask *mask);
5555

56-
int smp_call_function_single_async(int cpu, call_single_data_t *csd);
56+
int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
5757

5858
/*
5959
* Cpus stopping functions in panic. All have default weak definitions.

kernel/futex.c

Lines changed: 40 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -3710,8 +3710,7 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
37103710

37113711
if (op & FUTEX_CLOCK_REALTIME) {
37123712
flags |= FLAGS_CLOCKRT;
3713-
if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
3714-
cmd != FUTEX_WAIT_REQUEUE_PI)
3713+
if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
37153714
return -ENOSYS;
37163715
}
37173716

@@ -3758,42 +3757,52 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
37583757
return -ENOSYS;
37593758
}
37603759

3760+
static __always_inline bool futex_cmd_has_timeout(u32 cmd)
3761+
{
3762+
switch (cmd) {
3763+
case FUTEX_WAIT:
3764+
case FUTEX_LOCK_PI:
3765+
case FUTEX_WAIT_BITSET:
3766+
case FUTEX_WAIT_REQUEUE_PI:
3767+
return true;
3768+
}
3769+
return false;
3770+
}
3771+
3772+
static __always_inline int
3773+
futex_init_timeout(u32 cmd, u32 op, struct timespec64 *ts, ktime_t *t)
3774+
{
3775+
if (!timespec64_valid(ts))
3776+
return -EINVAL;
3777+
3778+
*t = timespec64_to_ktime(*ts);
3779+
if (cmd == FUTEX_WAIT)
3780+
*t = ktime_add_safe(ktime_get(), *t);
3781+
else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME))
3782+
*t = timens_ktime_to_host(CLOCK_MONOTONIC, *t);
3783+
return 0;
3784+
}
37613785

37623786
SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
37633787
const struct __kernel_timespec __user *, utime,
37643788
u32 __user *, uaddr2, u32, val3)
37653789
{
3766-
struct timespec64 ts;
3790+
int ret, cmd = op & FUTEX_CMD_MASK;
37673791
ktime_t t, *tp = NULL;
3768-
u32 val2 = 0;
3769-
int cmd = op & FUTEX_CMD_MASK;
3792+
struct timespec64 ts;
37703793

3771-
if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
3772-
cmd == FUTEX_WAIT_BITSET ||
3773-
cmd == FUTEX_WAIT_REQUEUE_PI)) {
3794+
if (utime && futex_cmd_has_timeout(cmd)) {
37743795
if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
37753796
return -EFAULT;
37763797
if (get_timespec64(&ts, utime))
37773798
return -EFAULT;
3778-
if (!timespec64_valid(&ts))
3779-
return -EINVAL;
3780-
3781-
t = timespec64_to_ktime(ts);
3782-
if (cmd == FUTEX_WAIT)
3783-
t = ktime_add_safe(ktime_get(), t);
3784-
else if (!(op & FUTEX_CLOCK_REALTIME))
3785-
t = timens_ktime_to_host(CLOCK_MONOTONIC, t);
3799+
ret = futex_init_timeout(cmd, op, &ts, &t);
3800+
if (ret)
3801+
return ret;
37863802
tp = &t;
37873803
}
3788-
/*
3789-
* requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
3790-
* number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
3791-
*/
3792-
if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3793-
cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3794-
val2 = (u32) (unsigned long) utime;
37953804

3796-
return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
3805+
return do_futex(uaddr, op, val, tp, uaddr2, (unsigned long)utime, val3);
37973806
}
37983807

37993808
#ifdef CONFIG_COMPAT
@@ -3959,31 +3968,20 @@ SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
39593968
const struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
39603969
u32, val3)
39613970
{
3962-
struct timespec64 ts;
3971+
int ret, cmd = op & FUTEX_CMD_MASK;
39633972
ktime_t t, *tp = NULL;
3964-
int val2 = 0;
3965-
int cmd = op & FUTEX_CMD_MASK;
3973+
struct timespec64 ts;
39663974

3967-
if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
3968-
cmd == FUTEX_WAIT_BITSET ||
3969-
cmd == FUTEX_WAIT_REQUEUE_PI)) {
3975+
if (utime && futex_cmd_has_timeout(cmd)) {
39703976
if (get_old_timespec32(&ts, utime))
39713977
return -EFAULT;
3972-
if (!timespec64_valid(&ts))
3973-
return -EINVAL;
3974-
3975-
t = timespec64_to_ktime(ts);
3976-
if (cmd == FUTEX_WAIT)
3977-
t = ktime_add_safe(ktime_get(), t);
3978-
else if (!(op & FUTEX_CLOCK_REALTIME))
3979-
t = timens_ktime_to_host(CLOCK_MONOTONIC, t);
3978+
ret = futex_init_timeout(cmd, op, &ts, &t);
3979+
if (ret)
3980+
return ret;
39803981
tp = &t;
39813982
}
3982-
if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3983-
cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3984-
val2 = (int) (unsigned long) utime;
39853983

3986-
return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
3984+
return do_futex(uaddr, op, val, tp, uaddr2, (unsigned long)utime, val3);
39873985
}
39883986
#endif /* CONFIG_COMPAT_32BIT_TIME */
39893987

kernel/locking/qrwlock.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -66,12 +66,12 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
6666
arch_spin_lock(&lock->wait_lock);
6767

6868
/* Try to acquire the lock directly if no reader is present */
69-
if (!atomic_read(&lock->cnts) &&
70-
(atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0))
69+
if (!(cnts = atomic_read(&lock->cnts)) &&
70+
atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED))
7171
goto unlock;
7272

7373
/* Set the waiting flag to notify readers that a writer is pending */
74-
atomic_add(_QW_WAITING, &lock->cnts);
74+
atomic_or(_QW_WAITING, &lock->cnts);
7575

7676
/* When no more readers or writers, set the locked flag */
7777
do {

kernel/smp.c

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -211,7 +211,7 @@ static u64 cfd_seq_inc(unsigned int src, unsigned int dst, unsigned int type)
211211
} while (0)
212212

213213
/* Record current CSD work for current CPU, NULL to erase. */
214-
static void __csd_lock_record(call_single_data_t *csd)
214+
static void __csd_lock_record(struct __call_single_data *csd)
215215
{
216216
if (!csd) {
217217
smp_mb(); /* NULL cur_csd after unlock. */
@@ -226,13 +226,13 @@ static void __csd_lock_record(call_single_data_t *csd)
226226
/* Or before unlock, as the case may be. */
227227
}
228228

229-
static __always_inline void csd_lock_record(call_single_data_t *csd)
229+
static __always_inline void csd_lock_record(struct __call_single_data *csd)
230230
{
231231
if (static_branch_unlikely(&csdlock_debug_enabled))
232232
__csd_lock_record(csd);
233233
}
234234

235-
static int csd_lock_wait_getcpu(call_single_data_t *csd)
235+
static int csd_lock_wait_getcpu(struct __call_single_data *csd)
236236
{
237237
unsigned int csd_type;
238238

@@ -282,7 +282,7 @@ static const char *csd_lock_get_type(unsigned int type)
282282
return (type >= ARRAY_SIZE(seq_type)) ? "?" : seq_type[type];
283283
}
284284

285-
static void csd_lock_print_extended(call_single_data_t *csd, int cpu)
285+
static void csd_lock_print_extended(struct __call_single_data *csd, int cpu)
286286
{
287287
struct cfd_seq_local *seq = &per_cpu(cfd_seq_local, cpu);
288288
unsigned int srccpu = csd->node.src;
@@ -321,7 +321,7 @@ static void csd_lock_print_extended(call_single_data_t *csd, int cpu)
321321
* the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
322322
* so waiting on other types gets much less information.
323323
*/
324-
static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id)
324+
static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id)
325325
{
326326
int cpu = -1;
327327
int cpux;
@@ -387,7 +387,7 @@ static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, in
387387
* previous function call. For multi-cpu calls its even more interesting
388388
* as we'll have to ensure no other cpu is observing our csd.
389389
*/
390-
static void __csd_lock_wait(call_single_data_t *csd)
390+
static void __csd_lock_wait(struct __call_single_data *csd)
391391
{
392392
int bug_id = 0;
393393
u64 ts0, ts1;
@@ -401,7 +401,7 @@ static void __csd_lock_wait(call_single_data_t *csd)
401401
smp_acquire__after_ctrl_dep();
402402
}
403403

404-
static __always_inline void csd_lock_wait(call_single_data_t *csd)
404+
static __always_inline void csd_lock_wait(struct __call_single_data *csd)
405405
{
406406
if (static_branch_unlikely(&csdlock_debug_enabled)) {
407407
__csd_lock_wait(csd);
@@ -431,17 +431,17 @@ static void __smp_call_single_queue_debug(int cpu, struct llist_node *node)
431431
#else
432432
#define cfd_seq_store(var, src, dst, type)
433433

434-
static void csd_lock_record(call_single_data_t *csd)
434+
static void csd_lock_record(struct __call_single_data *csd)
435435
{
436436
}
437437

438-
static __always_inline void csd_lock_wait(call_single_data_t *csd)
438+
static __always_inline void csd_lock_wait(struct __call_single_data *csd)
439439
{
440440
smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
441441
}
442442
#endif
443443

444-
static __always_inline void csd_lock(call_single_data_t *csd)
444+
static __always_inline void csd_lock(struct __call_single_data *csd)
445445
{
446446
csd_lock_wait(csd);
447447
csd->node.u_flags |= CSD_FLAG_LOCK;
@@ -454,7 +454,7 @@ static __always_inline void csd_lock(call_single_data_t *csd)
454454
smp_wmb();
455455
}
456456

457-
static __always_inline void csd_unlock(call_single_data_t *csd)
457+
static __always_inline void csd_unlock(struct __call_single_data *csd)
458458
{
459459
WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
460460

@@ -501,7 +501,7 @@ void __smp_call_single_queue(int cpu, struct llist_node *node)
501501
* for execution on the given CPU. data must already have
502502
* ->func, ->info, and ->flags set.
503503
*/
504-
static int generic_exec_single(int cpu, call_single_data_t *csd)
504+
static int generic_exec_single(int cpu, struct __call_single_data *csd)
505505
{
506506
if (cpu == smp_processor_id()) {
507507
smp_call_func_t func = csd->func;
@@ -784,7 +784,7 @@ EXPORT_SYMBOL(smp_call_function_single);
784784
* NOTE: Be careful, there is unfortunately no current debugging facility to
785785
* validate the correctness of this serialization.
786786
*/
787-
int smp_call_function_single_async(int cpu, call_single_data_t *csd)
787+
int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
788788
{
789789
int err = 0;
790790

kernel/up.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
2525
}
2626
EXPORT_SYMBOL(smp_call_function_single);
2727

28-
int smp_call_function_single_async(int cpu, call_single_data_t *csd)
28+
int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
2929
{
3030
unsigned long flags;
3131

0 commit comments

Comments
 (0)