Skip to content

Commit fbdc8f0

Browse files
committed
parisc: Rework arch_rw locking functions
Clean up the arch read/write locking functions based on the arc implemenation. This improves readability of those functions. Signed-off-by: Helge Deller <[email protected]>
1 parent 2772f0e commit fbdc8f0

File tree

2 files changed

+67
-82
lines changed

2 files changed

+67
-82
lines changed

arch/parisc/include/asm/spinlock.h

Lines changed: 56 additions & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -67,116 +67,93 @@ static inline int arch_spin_trylock(arch_spinlock_t *x)
6767

6868
/*
6969
* Read-write spinlocks, allowing multiple readers but only one writer.
70-
* Linux rwlocks are unfair to writers; they can be starved for an indefinite
71-
* time by readers. With care, they can also be taken in interrupt context.
70+
* Unfair locking as Writers could be starved indefinitely by Reader(s)
7271
*
73-
* In the PA-RISC implementation, we have a spinlock and a counter.
74-
* Readers use the lock to serialise their access to the counter (which
75-
* records how many readers currently hold the lock).
76-
* Writers hold the spinlock, preventing any readers or other writers from
77-
* grabbing the rwlock.
72+
* The spinlock itself is contained in @counter and access to it is
73+
* serialized with @lock_mutex.
7874
*/
7975

80-
/* Note that we have to ensure interrupts are disabled in case we're
81-
* interrupted by some other code that wants to grab the same read lock */
82-
static __inline__ void arch_read_lock(arch_rwlock_t *rw)
76+
/* 1 - lock taken successfully */
77+
static inline int arch_read_trylock(arch_rwlock_t *rw)
8378
{
79+
int ret = 0;
8480
unsigned long flags;
85-
local_irq_save(flags);
86-
arch_spin_lock_flags(&rw->lock, flags);
87-
rw->counter++;
88-
arch_spin_unlock(&rw->lock);
89-
local_irq_restore(flags);
90-
}
9181

92-
/* Note that we have to ensure interrupts are disabled in case we're
93-
* interrupted by some other code that wants to grab the same read lock */
94-
static __inline__ void arch_read_unlock(arch_rwlock_t *rw)
95-
{
96-
unsigned long flags;
9782
local_irq_save(flags);
98-
arch_spin_lock_flags(&rw->lock, flags);
99-
rw->counter--;
100-
arch_spin_unlock(&rw->lock);
83+
arch_spin_lock(&(rw->lock_mutex));
84+
85+
/*
86+
* zero means writer holds the lock exclusively, deny Reader.
87+
* Otherwise grant lock to first/subseq reader
88+
*/
89+
if (rw->counter > 0) {
90+
rw->counter--;
91+
ret = 1;
92+
}
93+
94+
arch_spin_unlock(&(rw->lock_mutex));
10195
local_irq_restore(flags);
96+
97+
return ret;
10298
}
10399

104-
/* Note that we have to ensure interrupts are disabled in case we're
105-
* interrupted by some other code that wants to grab the same read lock */
106-
static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
100+
/* 1 - lock taken successfully */
101+
static inline int arch_write_trylock(arch_rwlock_t *rw)
107102
{
103+
int ret = 0;
108104
unsigned long flags;
109-
retry:
105+
110106
local_irq_save(flags);
111-
if (arch_spin_trylock(&rw->lock)) {
112-
rw->counter++;
113-
arch_spin_unlock(&rw->lock);
114-
local_irq_restore(flags);
115-
return 1;
107+
arch_spin_lock(&(rw->lock_mutex));
108+
109+
/*
110+
* If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
111+
* deny writer. Otherwise if unlocked grant to writer
112+
* Hence the claim that Linux rwlocks are unfair to writers.
113+
* (can be starved for an indefinite time by readers).
114+
*/
115+
if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
116+
rw->counter = 0;
117+
ret = 1;
116118
}
117-
119+
arch_spin_unlock(&(rw->lock_mutex));
118120
local_irq_restore(flags);
119-
/* If write-locked, we fail to acquire the lock */
120-
if (rw->counter < 0)
121-
return 0;
122121

123-
/* Wait until we have a realistic chance at the lock */
124-
while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
122+
return ret;
123+
}
124+
125+
static inline void arch_read_lock(arch_rwlock_t *rw)
126+
{
127+
while (!arch_read_trylock(rw))
125128
cpu_relax();
129+
}
126130

127-
goto retry;
131+
static inline void arch_write_lock(arch_rwlock_t *rw)
132+
{
133+
while (!arch_write_trylock(rw))
134+
cpu_relax();
128135
}
129136

130-
/* Note that we have to ensure interrupts are disabled in case we're
131-
* interrupted by some other code that wants to read_trylock() this lock */
132-
static __inline__ void arch_write_lock(arch_rwlock_t *rw)
137+
static inline void arch_read_unlock(arch_rwlock_t *rw)
133138
{
134139
unsigned long flags;
135-
retry:
136-
local_irq_save(flags);
137-
arch_spin_lock_flags(&rw->lock, flags);
138-
139-
if (rw->counter != 0) {
140-
arch_spin_unlock(&rw->lock);
141-
local_irq_restore(flags);
142-
143-
while (rw->counter != 0)
144-
cpu_relax();
145-
146-
goto retry;
147-
}
148140

149-
rw->counter = -1; /* mark as write-locked */
150-
mb();
141+
local_irq_save(flags);
142+
arch_spin_lock(&(rw->lock_mutex));
143+
rw->counter++;
144+
arch_spin_unlock(&(rw->lock_mutex));
151145
local_irq_restore(flags);
152146
}
153147

154-
static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
155-
{
156-
rw->counter = 0;
157-
arch_spin_unlock(&rw->lock);
158-
}
159-
160-
/* Note that we have to ensure interrupts are disabled in case we're
161-
* interrupted by some other code that wants to read_trylock() this lock */
162-
static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
148+
static inline void arch_write_unlock(arch_rwlock_t *rw)
163149
{
164150
unsigned long flags;
165-
int result = 0;
166151

167152
local_irq_save(flags);
168-
if (arch_spin_trylock(&rw->lock)) {
169-
if (rw->counter == 0) {
170-
rw->counter = -1;
171-
result = 1;
172-
} else {
173-
/* Read-locked. Oh well. */
174-
arch_spin_unlock(&rw->lock);
175-
}
176-
}
153+
arch_spin_lock(&(rw->lock_mutex));
154+
rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
155+
arch_spin_unlock(&(rw->lock_mutex));
177156
local_irq_restore(flags);
178-
179-
return result;
180157
}
181158

182159
#endif /* __ASM_SPINLOCK_H */

arch/parisc/include/asm/spinlock_types.h

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,19 @@ typedef struct {
1212
#endif
1313
} arch_spinlock_t;
1414

15+
16+
/* counter:
17+
* Unlocked : 0x0100_0000
18+
* Read lock(s) : 0x00FF_FFFF to 0x01 (Multiple Readers decrement it)
19+
* Write lock : 0x0, but only if prior value is "unlocked" 0x0100_0000
20+
*/
1521
typedef struct {
16-
arch_spinlock_t lock;
17-
volatile int counter;
22+
arch_spinlock_t lock_mutex;
23+
volatile unsigned int counter;
1824
} arch_rwlock_t;
1925

20-
#define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 }
26+
#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
27+
#define __ARCH_RW_LOCK_UNLOCKED { .lock_mutex = __ARCH_SPIN_LOCK_UNLOCKED, \
28+
.counter = __ARCH_RW_LOCK_UNLOCKED__ }
2129

2230
#endif

0 commit comments

Comments
 (0)