@@ -67,116 +67,93 @@ static inline int arch_spin_trylock(arch_spinlock_t *x)
67
67
68
68
/*
69
69
* Read-write spinlocks, allowing multiple readers but only one writer.
70
- * Linux rwlocks are unfair to writers; they can be starved for an indefinite
71
- * time by readers. With care, they can also be taken in interrupt context.
70
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
72
71
*
73
- * In the PA-RISC implementation, we have a spinlock and a counter.
74
- * Readers use the lock to serialise their access to the counter (which
75
- * records how many readers currently hold the lock).
76
- * Writers hold the spinlock, preventing any readers or other writers from
77
- * grabbing the rwlock.
72
+ * The spinlock itself is contained in @counter and access to it is
73
+ * serialized with @lock_mutex.
78
74
*/
79
75
80
- /* Note that we have to ensure interrupts are disabled in case we're
81
- * interrupted by some other code that wants to grab the same read lock */
82
- static __inline__ void arch_read_lock (arch_rwlock_t * rw )
76
+ /* 1 - lock taken successfully */
77
+ static inline int arch_read_trylock (arch_rwlock_t * rw )
83
78
{
79
+ int ret = 0 ;
84
80
unsigned long flags ;
85
- local_irq_save (flags );
86
- arch_spin_lock_flags (& rw -> lock , flags );
87
- rw -> counter ++ ;
88
- arch_spin_unlock (& rw -> lock );
89
- local_irq_restore (flags );
90
- }
91
81
92
- /* Note that we have to ensure interrupts are disabled in case we're
93
- * interrupted by some other code that wants to grab the same read lock */
94
- static __inline__ void arch_read_unlock (arch_rwlock_t * rw )
95
- {
96
- unsigned long flags ;
97
82
local_irq_save (flags );
98
- arch_spin_lock_flags (& rw -> lock , flags );
99
- rw -> counter -- ;
100
- arch_spin_unlock (& rw -> lock );
83
+ arch_spin_lock (& (rw -> lock_mutex ));
84
+
85
+ /*
86
+ * zero means writer holds the lock exclusively, deny Reader.
87
+ * Otherwise grant lock to first/subseq reader
88
+ */
89
+ if (rw -> counter > 0 ) {
90
+ rw -> counter -- ;
91
+ ret = 1 ;
92
+ }
93
+
94
+ arch_spin_unlock (& (rw -> lock_mutex ));
101
95
local_irq_restore (flags );
96
+
97
+ return ret ;
102
98
}
103
99
104
- /* Note that we have to ensure interrupts are disabled in case we're
105
- * interrupted by some other code that wants to grab the same read lock */
106
- static __inline__ int arch_read_trylock (arch_rwlock_t * rw )
100
+ /* 1 - lock taken successfully */
101
+ static inline int arch_write_trylock (arch_rwlock_t * rw )
107
102
{
103
+ int ret = 0 ;
108
104
unsigned long flags ;
109
- retry :
105
+
110
106
local_irq_save (flags );
111
- if (arch_spin_trylock (& rw -> lock )) {
112
- rw -> counter ++ ;
113
- arch_spin_unlock (& rw -> lock );
114
- local_irq_restore (flags );
115
- return 1 ;
107
+ arch_spin_lock (& (rw -> lock_mutex ));
108
+
109
+ /*
110
+ * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
111
+ * deny writer. Otherwise if unlocked grant to writer
112
+ * Hence the claim that Linux rwlocks are unfair to writers.
113
+ * (can be starved for an indefinite time by readers).
114
+ */
115
+ if (rw -> counter == __ARCH_RW_LOCK_UNLOCKED__ ) {
116
+ rw -> counter = 0 ;
117
+ ret = 1 ;
116
118
}
117
-
119
+ arch_spin_unlock ( & ( rw -> lock_mutex ));
118
120
local_irq_restore (flags );
119
- /* If write-locked, we fail to acquire the lock */
120
- if (rw -> counter < 0 )
121
- return 0 ;
122
121
123
- /* Wait until we have a realistic chance at the lock */
124
- while (arch_spin_is_locked (& rw -> lock ) && rw -> counter >= 0 )
122
+ return ret ;
123
+ }
124
+
125
+ static inline void arch_read_lock (arch_rwlock_t * rw )
126
+ {
127
+ while (!arch_read_trylock (rw ))
125
128
cpu_relax ();
129
+ }
126
130
127
- goto retry ;
131
+ static inline void arch_write_lock (arch_rwlock_t * rw )
132
+ {
133
+ while (!arch_write_trylock (rw ))
134
+ cpu_relax ();
128
135
}
129
136
130
- /* Note that we have to ensure interrupts are disabled in case we're
131
- * interrupted by some other code that wants to read_trylock() this lock */
132
- static __inline__ void arch_write_lock (arch_rwlock_t * rw )
137
+ static inline void arch_read_unlock (arch_rwlock_t * rw )
133
138
{
134
139
unsigned long flags ;
135
- retry :
136
- local_irq_save (flags );
137
- arch_spin_lock_flags (& rw -> lock , flags );
138
-
139
- if (rw -> counter != 0 ) {
140
- arch_spin_unlock (& rw -> lock );
141
- local_irq_restore (flags );
142
-
143
- while (rw -> counter != 0 )
144
- cpu_relax ();
145
-
146
- goto retry ;
147
- }
148
140
149
- rw -> counter = -1 ; /* mark as write-locked */
150
- mb ();
141
+ local_irq_save (flags );
142
+ arch_spin_lock (& (rw -> lock_mutex ));
143
+ rw -> counter ++ ;
144
+ arch_spin_unlock (& (rw -> lock_mutex ));
151
145
local_irq_restore (flags );
152
146
}
153
147
154
- static __inline__ void arch_write_unlock (arch_rwlock_t * rw )
155
- {
156
- rw -> counter = 0 ;
157
- arch_spin_unlock (& rw -> lock );
158
- }
159
-
160
- /* Note that we have to ensure interrupts are disabled in case we're
161
- * interrupted by some other code that wants to read_trylock() this lock */
162
- static __inline__ int arch_write_trylock (arch_rwlock_t * rw )
148
+ static inline void arch_write_unlock (arch_rwlock_t * rw )
163
149
{
164
150
unsigned long flags ;
165
- int result = 0 ;
166
151
167
152
local_irq_save (flags );
168
- if (arch_spin_trylock (& rw -> lock )) {
169
- if (rw -> counter == 0 ) {
170
- rw -> counter = -1 ;
171
- result = 1 ;
172
- } else {
173
- /* Read-locked. Oh well. */
174
- arch_spin_unlock (& rw -> lock );
175
- }
176
- }
153
+ arch_spin_lock (& (rw -> lock_mutex ));
154
+ rw -> counter = __ARCH_RW_LOCK_UNLOCKED__ ;
155
+ arch_spin_unlock (& (rw -> lock_mutex ));
177
156
local_irq_restore (flags );
178
-
179
- return result ;
180
157
}
181
158
182
159
#endif /* __ASM_SPINLOCK_H */
0 commit comments