Skip to content

Commit 5ba33b4

Browse files
committed
Merge tag 'locking-urgent-2021-03-21' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking fixes from Ingo Molnar: - Get static calls & modules right. Hopefully. - WW mutex fixes * tag 'locking-urgent-2021-03-21' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: static_call: Fix static_call_update() sanity check static_call: Align static_call_is_init() patching condition static_call: Fix static_call_set_init() locking/ww_mutex: Fix acquire/release imbalance in ww_acquire_init()/ww_acquire_fini() locking/ww_mutex: Simplify use_ww_ctx & ww_ctx handling
2 parents 92ed88c + 38c9358 commit 5ba33b4

File tree

4 files changed

+49
-31
lines changed

4 files changed

+49
-31
lines changed

include/linux/ww_mutex.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -173,9 +173,10 @@ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
173173
*/
174174
static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
175175
{
176-
#ifdef CONFIG_DEBUG_MUTEXES
176+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
177177
mutex_release(&ctx->dep_map, _THIS_IP_);
178-
178+
#endif
179+
#ifdef CONFIG_DEBUG_MUTEXES
179180
DEBUG_LOCKS_WARN_ON(ctx->acquired);
180181
if (!IS_ENABLED(CONFIG_PROVE_LOCKING))
181182
/*

kernel/jump_label.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -407,6 +407,14 @@ static bool jump_label_can_update(struct jump_entry *entry, bool init)
407407
return false;
408408

409409
if (!kernel_text_address(jump_entry_code(entry))) {
410+
/*
411+
* This skips patching built-in __exit, which
412+
* is part of init_section_contains() but is
413+
* not part of kernel_text_address().
414+
*
415+
* Skipping built-in __exit is fine since it
416+
* will never be executed.
417+
*/
410418
WARN_ONCE(!jump_entry_is_init(entry),
411419
"can't patch jump_label at %pS",
412420
(void *)jump_entry_code(entry));

kernel/locking/mutex.c

Lines changed: 14 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -626,7 +626,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
626626
*/
627627
static __always_inline bool
628628
mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
629-
const bool use_ww_ctx, struct mutex_waiter *waiter)
629+
struct mutex_waiter *waiter)
630630
{
631631
if (!waiter) {
632632
/*
@@ -702,7 +702,7 @@ mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
702702
#else
703703
static __always_inline bool
704704
mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
705-
const bool use_ww_ctx, struct mutex_waiter *waiter)
705+
struct mutex_waiter *waiter)
706706
{
707707
return false;
708708
}
@@ -922,14 +922,17 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
922922
struct ww_mutex *ww;
923923
int ret;
924924

925+
if (!use_ww_ctx)
926+
ww_ctx = NULL;
927+
925928
might_sleep();
926929

927930
#ifdef CONFIG_DEBUG_MUTEXES
928931
DEBUG_LOCKS_WARN_ON(lock->magic != lock);
929932
#endif
930933

931934
ww = container_of(lock, struct ww_mutex, base);
932-
if (use_ww_ctx && ww_ctx) {
935+
if (ww_ctx) {
933936
if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
934937
return -EALREADY;
935938

@@ -946,10 +949,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
946949
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
947950

948951
if (__mutex_trylock(lock) ||
949-
mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
952+
mutex_optimistic_spin(lock, ww_ctx, NULL)) {
950953
/* got the lock, yay! */
951954
lock_acquired(&lock->dep_map, ip);
952-
if (use_ww_ctx && ww_ctx)
955+
if (ww_ctx)
953956
ww_mutex_set_context_fastpath(ww, ww_ctx);
954957
preempt_enable();
955958
return 0;
@@ -960,7 +963,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
960963
* After waiting to acquire the wait_lock, try again.
961964
*/
962965
if (__mutex_trylock(lock)) {
963-
if (use_ww_ctx && ww_ctx)
966+
if (ww_ctx)
964967
__ww_mutex_check_waiters(lock, ww_ctx);
965968

966969
goto skip_wait;
@@ -1013,7 +1016,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
10131016
goto err;
10141017
}
10151018

1016-
if (use_ww_ctx && ww_ctx) {
1019+
if (ww_ctx) {
10171020
ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
10181021
if (ret)
10191022
goto err;
@@ -1026,7 +1029,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
10261029
* ww_mutex needs to always recheck its position since its waiter
10271030
* list is not FIFO ordered.
10281031
*/
1029-
if ((use_ww_ctx && ww_ctx) || !first) {
1032+
if (ww_ctx || !first) {
10301033
first = __mutex_waiter_is_first(lock, &waiter);
10311034
if (first)
10321035
__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
@@ -1039,7 +1042,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
10391042
* or we must see its unlock and acquire.
10401043
*/
10411044
if (__mutex_trylock(lock) ||
1042-
(first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
1045+
(first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
10431046
break;
10441047

10451048
spin_lock(&lock->wait_lock);
@@ -1048,7 +1051,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
10481051
acquired:
10491052
__set_current_state(TASK_RUNNING);
10501053

1051-
if (use_ww_ctx && ww_ctx) {
1054+
if (ww_ctx) {
10521055
/*
10531056
* Wound-Wait; we stole the lock (!first_waiter), check the
10541057
* waiters as anyone might want to wound us.
@@ -1068,7 +1071,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
10681071
/* got the lock - cleanup and rejoice! */
10691072
lock_acquired(&lock->dep_map, ip);
10701073

1071-
if (use_ww_ctx && ww_ctx)
1074+
if (ww_ctx)
10721075
ww_mutex_lock_acquired(ww, ww_ctx);
10731076

10741077
spin_unlock(&lock->wait_lock);

kernel/static_call.c

Lines changed: 24 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -35,27 +35,30 @@ static inline void *static_call_addr(struct static_call_site *site)
3535
return (void *)((long)site->addr + (long)&site->addr);
3636
}
3737

38+
static inline unsigned long __static_call_key(const struct static_call_site *site)
39+
{
40+
return (long)site->key + (long)&site->key;
41+
}
3842

3943
static inline struct static_call_key *static_call_key(const struct static_call_site *site)
4044
{
41-
return (struct static_call_key *)
42-
(((long)site->key + (long)&site->key) & ~STATIC_CALL_SITE_FLAGS);
45+
return (void *)(__static_call_key(site) & ~STATIC_CALL_SITE_FLAGS);
4346
}
4447

4548
/* These assume the key is word-aligned. */
4649
static inline bool static_call_is_init(struct static_call_site *site)
4750
{
48-
return ((long)site->key + (long)&site->key) & STATIC_CALL_SITE_INIT;
51+
return __static_call_key(site) & STATIC_CALL_SITE_INIT;
4952
}
5053

5154
static inline bool static_call_is_tail(struct static_call_site *site)
5255
{
53-
return ((long)site->key + (long)&site->key) & STATIC_CALL_SITE_TAIL;
56+
return __static_call_key(site) & STATIC_CALL_SITE_TAIL;
5457
}
5558

5659
static inline void static_call_set_init(struct static_call_site *site)
5760
{
58-
site->key = ((long)static_call_key(site) | STATIC_CALL_SITE_INIT) -
61+
site->key = (__static_call_key(site) | STATIC_CALL_SITE_INIT) -
5962
(long)&site->key;
6063
}
6164

@@ -146,6 +149,7 @@ void __static_call_update(struct static_call_key *key, void *tramp, void *func)
146149
};
147150

148151
for (site_mod = &first; site_mod; site_mod = site_mod->next) {
152+
bool init = system_state < SYSTEM_RUNNING;
149153
struct module *mod = site_mod->mod;
150154

151155
if (!site_mod->sites) {
@@ -165,32 +169,34 @@ void __static_call_update(struct static_call_key *key, void *tramp, void *func)
165169
if (mod) {
166170
stop = mod->static_call_sites +
167171
mod->num_static_call_sites;
172+
init = mod->state == MODULE_STATE_COMING;
168173
}
169174
#endif
170175

171176
for (site = site_mod->sites;
172177
site < stop && static_call_key(site) == key; site++) {
173178
void *site_addr = static_call_addr(site);
174179

175-
if (static_call_is_init(site)) {
176-
/*
177-
* Don't write to call sites which were in
178-
* initmem and have since been freed.
179-
*/
180-
if (!mod && system_state >= SYSTEM_RUNNING)
181-
continue;
182-
if (mod && !within_module_init((unsigned long)site_addr, mod))
183-
continue;
184-
}
180+
if (!init && static_call_is_init(site))
181+
continue;
185182

186183
if (!kernel_text_address((unsigned long)site_addr)) {
187-
WARN_ONCE(1, "can't patch static call site at %pS",
184+
/*
185+
* This skips patching built-in __exit, which
186+
* is part of init_section_contains() but is
187+
* not part of kernel_text_address().
188+
*
189+
* Skipping built-in __exit is fine since it
190+
* will never be executed.
191+
*/
192+
WARN_ONCE(!static_call_is_init(site),
193+
"can't patch static call site at %pS",
188194
site_addr);
189195
continue;
190196
}
191197

192198
arch_static_call_transform(site_addr, NULL, func,
193-
static_call_is_tail(site));
199+
static_call_is_tail(site));
194200
}
195201
}
196202

@@ -349,7 +355,7 @@ static int static_call_add_module(struct module *mod)
349355
struct static_call_site *site;
350356

351357
for (site = start; site != stop; site++) {
352-
unsigned long s_key = (long)site->key + (long)&site->key;
358+
unsigned long s_key = __static_call_key(site);
353359
unsigned long addr = s_key & ~STATIC_CALL_SITE_FLAGS;
354360
unsigned long key;
355361

0 commit comments

Comments
 (0)