Skip to content

Commit f98a3dc

Browse files
arndbPeter Zijlstra
authored andcommitted
locking: Remove spin_lock_flags() etc
parisc, ia64 and powerpc32 are the only remaining architectures that provide custom arch_{spin,read,write}_lock_flags() functions, which are meant to re-enable interrupts while waiting for a spinlock. However, none of these can actually run into this codepath, because it is only called on architectures without CONFIG_GENERIC_LOCKBREAK, or when CONFIG_DEBUG_LOCK_ALLOC is set without CONFIG_LOCKDEP, and none of those combinations are possible on the three architectures. Going back in the git history, it appears that arch/mn10300 may have been able to run into this code path, but there is a good chance that it never worked. On the architectures that still exist, it was already impossible to hit back in 2008 after the introduction of CONFIG_GENERIC_LOCKBREAK, and possibly earlier. As this is all dead code, just remove it and the helper functions built around it. For arch/ia64, the inline asm could be cleaned up, but it seems safer to leave it untouched. Signed-off-by: Arnd Bergmann <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Acked-by: Helge Deller <[email protected]> # parisc Link: https://lore.kernel.org/r/[email protected]
1 parent 5197fcd commit f98a3dc

File tree

12 files changed

+9
-125
lines changed

12 files changed

+9
-125
lines changed

arch/ia64/include/asm/spinlock.h

Lines changed: 6 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -124,18 +124,13 @@ static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
124124
__ticket_spin_unlock(lock);
125125
}
126126

127-
static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
128-
unsigned long flags)
129-
{
130-
arch_spin_lock(lock);
131-
}
132-
#define arch_spin_lock_flags arch_spin_lock_flags
133-
134127
#ifdef ASM_SUPPORTED
135128

136129
static __always_inline void
137-
arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
130+
arch_read_lock(arch_rwlock_t *lock)
138131
{
132+
unsigned long flags = 0;
133+
139134
__asm__ __volatile__ (
140135
"tbit.nz p6, p0 = %1,%2\n"
141136
"br.few 3f\n"
@@ -157,13 +152,8 @@ arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
157152
: "p6", "p7", "r2", "memory");
158153
}
159154

160-
#define arch_read_lock_flags arch_read_lock_flags
161-
#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
162-
163155
#else /* !ASM_SUPPORTED */
164156

165-
#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
166-
167157
#define arch_read_lock(rw) \
168158
do { \
169159
arch_rwlock_t *__read_lock_ptr = (rw); \
@@ -186,8 +176,10 @@ do { \
186176
#ifdef ASM_SUPPORTED
187177

188178
static __always_inline void
189-
arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
179+
arch_write_lock(arch_rwlock_t *lock)
190180
{
181+
unsigned long flags = 0;
182+
191183
__asm__ __volatile__ (
192184
"tbit.nz p6, p0 = %1, %2\n"
193185
"mov ar.ccv = r0\n"
@@ -210,9 +202,6 @@ arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
210202
: "ar.ccv", "p6", "p7", "r2", "r29", "memory");
211203
}
212204

213-
#define arch_write_lock_flags arch_write_lock_flags
214-
#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
215-
216205
#define arch_write_trylock(rw) \
217206
({ \
218207
register long result; \

arch/openrisc/include/asm/spinlock.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,6 @@
1919

2020
#include <asm/qrwlock.h>
2121

22-
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
23-
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
24-
2522
#define arch_spin_relax(lock) cpu_relax()
2623
#define arch_read_relax(lock) cpu_relax()
2724
#define arch_write_relax(lock) cpu_relax()

arch/parisc/include/asm/spinlock.h

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -23,21 +23,6 @@ static inline void arch_spin_lock(arch_spinlock_t *x)
2323
continue;
2424
}
2525

26-
static inline void arch_spin_lock_flags(arch_spinlock_t *x,
27-
unsigned long flags)
28-
{
29-
volatile unsigned int *a;
30-
31-
a = __ldcw_align(x);
32-
while (__ldcw(a) == 0)
33-
while (*a == 0)
34-
if (flags & PSW_SM_I) {
35-
local_irq_enable();
36-
local_irq_disable();
37-
}
38-
}
39-
#define arch_spin_lock_flags arch_spin_lock_flags
40-
4126
static inline void arch_spin_unlock(arch_spinlock_t *x)
4227
{
4328
volatile unsigned int *a;

arch/powerpc/include/asm/simple_spinlock.h

Lines changed: 0 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -123,27 +123,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
123123
}
124124
}
125125

126-
static inline
127-
void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
128-
{
129-
unsigned long flags_dis;
130-
131-
while (1) {
132-
if (likely(__arch_spin_trylock(lock) == 0))
133-
break;
134-
local_save_flags(flags_dis);
135-
local_irq_restore(flags);
136-
do {
137-
HMT_low();
138-
if (is_shared_processor())
139-
splpar_spin_yield(lock);
140-
} while (unlikely(lock->slock != 0));
141-
HMT_medium();
142-
local_irq_restore(flags_dis);
143-
}
144-
}
145-
#define arch_spin_lock_flags arch_spin_lock_flags
146-
147126
static inline void arch_spin_unlock(arch_spinlock_t *lock)
148127
{
149128
__asm__ __volatile__("# arch_spin_unlock\n\t"

arch/s390/include/asm/spinlock.h

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -67,14 +67,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lp)
6767
arch_spin_lock_wait(lp);
6868
}
6969

70-
static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
71-
unsigned long flags)
72-
{
73-
if (!arch_spin_trylock_once(lp))
74-
arch_spin_lock_wait(lp);
75-
}
76-
#define arch_spin_lock_flags arch_spin_lock_flags
77-
7870
static inline int arch_spin_trylock(arch_spinlock_t *lp)
7971
{
8072
if (!arch_spin_trylock_once(lp))

include/linux/lockdep.h

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -481,23 +481,6 @@ do { \
481481

482482
#endif /* CONFIG_LOCK_STAT */
483483

484-
#ifdef CONFIG_LOCKDEP
485-
486-
/*
487-
* On lockdep we dont want the hand-coded irq-enable of
488-
* _raw_*_lock_flags() code, because lockdep assumes
489-
* that interrupts are not re-enabled during lock-acquire:
490-
*/
491-
#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
492-
LOCK_CONTENDED((_lock), (try), (lock))
493-
494-
#else /* CONFIG_LOCKDEP */
495-
496-
#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
497-
lockfl((_lock), (flags))
498-
499-
#endif /* CONFIG_LOCKDEP */
500-
501484
#ifdef CONFIG_PROVE_LOCKING
502485
extern void print_irqtrace_events(struct task_struct *curr);
503486
#else

include/linux/rwlock.h

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -30,31 +30,16 @@ do { \
3030

3131
#ifdef CONFIG_DEBUG_SPINLOCK
3232
extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
33-
#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
3433
extern int do_raw_read_trylock(rwlock_t *lock);
3534
extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
3635
extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
37-
#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
3836
extern int do_raw_write_trylock(rwlock_t *lock);
3937
extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
4038
#else
41-
42-
#ifndef arch_read_lock_flags
43-
# define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
44-
#endif
45-
46-
#ifndef arch_write_lock_flags
47-
# define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
48-
#endif
49-
5039
# define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
51-
# define do_raw_read_lock_flags(lock, flags) \
52-
do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
5340
# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
5441
# define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
5542
# define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0)
56-
# define do_raw_write_lock_flags(lock, flags) \
57-
do {__acquire(lock); arch_write_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
5843
# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
5944
# define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
6045
#endif

include/linux/rwlock_api_smp.h

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -157,8 +157,7 @@ static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
157157
local_irq_save(flags);
158158
preempt_disable();
159159
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
160-
LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock,
161-
do_raw_read_lock_flags, &flags);
160+
LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
162161
return flags;
163162
}
164163

@@ -184,8 +183,7 @@ static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
184183
local_irq_save(flags);
185184
preempt_disable();
186185
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
187-
LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock,
188-
do_raw_write_lock_flags, &flags);
186+
LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
189187
return flags;
190188
}
191189

include/linux/spinlock.h

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -177,7 +177,6 @@ do { \
177177

178178
#ifdef CONFIG_DEBUG_SPINLOCK
179179
extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
180-
#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
181180
extern int do_raw_spin_trylock(raw_spinlock_t *lock);
182181
extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
183182
#else
@@ -188,18 +187,6 @@ static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
188187
mmiowb_spin_lock();
189188
}
190189

191-
#ifndef arch_spin_lock_flags
192-
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
193-
#endif
194-
195-
static inline void
196-
do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
197-
{
198-
__acquire(lock);
199-
arch_spin_lock_flags(&lock->raw_lock, *flags);
200-
mmiowb_spin_lock();
201-
}
202-
203190
static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
204191
{
205192
int ret = arch_spin_trylock(&(lock)->raw_lock);

include/linux/spinlock_api_smp.h

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -108,16 +108,7 @@ static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
108108
local_irq_save(flags);
109109
preempt_disable();
110110
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
111-
/*
112-
* On lockdep we dont want the hand-coded irq-enable of
113-
* do_raw_spin_lock_flags() code, because lockdep assumes
114-
* that interrupts are not re-enabled during lock-acquire:
115-
*/
116-
#ifdef CONFIG_LOCKDEP
117111
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
118-
#else
119-
do_raw_spin_lock_flags(lock, &flags);
120-
#endif
121112
return flags;
122113
}
123114

0 commit comments

Comments
 (0)