@@ -15,6 +15,11 @@ typedef struct {
1515#endif
1616} local_lock_t ;
1717
18+ typedef struct {
19+ local_lock_t llock ;
20+ unsigned int acquired ;
21+ } localtry_lock_t ;
22+
1823#ifdef CONFIG_DEBUG_LOCK_ALLOC
1924# define LOCAL_LOCK_DEBUG_INIT (lockname ) \
2025 .dep_map = { \
@@ -31,6 +36,13 @@ static inline void local_lock_acquire(local_lock_t *l)
3136 l -> owner = current ;
3237}
3338
39+ static inline void local_trylock_acquire (local_lock_t * l )
40+ {
41+ lock_map_acquire_try (& l -> dep_map );
42+ DEBUG_LOCKS_WARN_ON (l -> owner );
43+ l -> owner = current ;
44+ }
45+
3446static inline void local_lock_release (local_lock_t * l )
3547{
3648 DEBUG_LOCKS_WARN_ON (l -> owner != current );
@@ -45,11 +57,13 @@ static inline void local_lock_debug_init(local_lock_t *l)
4557#else /* CONFIG_DEBUG_LOCK_ALLOC */
4658# define LOCAL_LOCK_DEBUG_INIT (lockname )
4759static inline void local_lock_acquire (local_lock_t * l ) { }
60+ static inline void local_trylock_acquire (local_lock_t * l ) { }
4861static inline void local_lock_release (local_lock_t * l ) { }
4962static inline void local_lock_debug_init (local_lock_t * l ) { }
5063#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
5164
5265#define INIT_LOCAL_LOCK (lockname ) { LOCAL_LOCK_DEBUG_INIT(lockname) }
66+ #define INIT_LOCALTRY_LOCK (lockname ) { .llock = { LOCAL_LOCK_DEBUG_INIT(lockname.llock) }}
5367
5468#define __local_lock_init (lock ) \
5569do { \
@@ -118,15 +132,115 @@ do { \
118132#define __local_unlock_nested_bh (lock ) \
119133 local_lock_release(this_cpu_ptr(lock))
120134
135+ /* localtry_lock_t variants */
136+
137+ #define __localtry_lock_init (lock ) \
138+ do { \
139+ __local_lock_init(&(lock)->llock); \
140+ WRITE_ONCE((lock)->acquired, 0); \
141+ } while (0)
142+
143+ #define __localtry_lock (lock ) \
144+ do { \
145+ localtry_lock_t *lt; \
146+ preempt_disable(); \
147+ lt = this_cpu_ptr(lock); \
148+ local_lock_acquire(<->llock); \
149+ WRITE_ONCE(lt->acquired, 1); \
150+ } while (0)
151+
152+ #define __localtry_lock_irq (lock ) \
153+ do { \
154+ localtry_lock_t *lt; \
155+ local_irq_disable(); \
156+ lt = this_cpu_ptr(lock); \
157+ local_lock_acquire(<->llock); \
158+ WRITE_ONCE(lt->acquired, 1); \
159+ } while (0)
160+
161+ #define __localtry_lock_irqsave (lock , flags ) \
162+ do { \
163+ localtry_lock_t *lt; \
164+ local_irq_save(flags); \
165+ lt = this_cpu_ptr(lock); \
166+ local_lock_acquire(<->llock); \
167+ WRITE_ONCE(lt->acquired, 1); \
168+ } while (0)
169+
170+ #define __localtry_trylock (lock ) \
171+ ({ \
172+ localtry_lock_t *lt; \
173+ bool _ret; \
174+ \
175+ preempt_disable(); \
176+ lt = this_cpu_ptr(lock); \
177+ if (!READ_ONCE(lt->acquired)) { \
178+ WRITE_ONCE(lt->acquired, 1); \
179+ local_trylock_acquire(<->llock); \
180+ _ret = true; \
181+ } else { \
182+ _ret = false; \
183+ preempt_enable(); \
184+ } \
185+ _ret; \
186+ })
187+
188+ #define __localtry_trylock_irqsave (lock , flags ) \
189+ ({ \
190+ localtry_lock_t *lt; \
191+ bool _ret; \
192+ \
193+ local_irq_save(flags); \
194+ lt = this_cpu_ptr(lock); \
195+ if (!READ_ONCE(lt->acquired)) { \
196+ WRITE_ONCE(lt->acquired, 1); \
197+ local_trylock_acquire(<->llock); \
198+ _ret = true; \
199+ } else { \
200+ _ret = false; \
201+ local_irq_restore(flags); \
202+ } \
203+ _ret; \
204+ })
205+
206+ #define __localtry_unlock (lock ) \
207+ do { \
208+ localtry_lock_t *lt; \
209+ lt = this_cpu_ptr(lock); \
210+ WRITE_ONCE(lt->acquired, 0); \
211+ local_lock_release(<->llock); \
212+ preempt_enable(); \
213+ } while (0)
214+
215+ #define __localtry_unlock_irq (lock ) \
216+ do { \
217+ localtry_lock_t *lt; \
218+ lt = this_cpu_ptr(lock); \
219+ WRITE_ONCE(lt->acquired, 0); \
220+ local_lock_release(<->llock); \
221+ local_irq_enable(); \
222+ } while (0)
223+
224+ #define __localtry_unlock_irqrestore (lock , flags ) \
225+ do { \
226+ localtry_lock_t *lt; \
227+ lt = this_cpu_ptr(lock); \
228+ WRITE_ONCE(lt->acquired, 0); \
229+ local_lock_release(<->llock); \
230+ local_irq_restore(flags); \
231+ } while (0)
232+
121233#else /* !CONFIG_PREEMPT_RT */
122234
123235/*
124236 * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the
125237 * critical section while staying preemptible.
126238 */
127239typedef spinlock_t local_lock_t ;
240+ typedef spinlock_t localtry_lock_t ;
128241
129242#define INIT_LOCAL_LOCK (lockname ) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
243+ #define INIT_LOCALTRY_LOCK (lockname ) INIT_LOCAL_LOCK(lockname)
130244
131245#define __local_lock_init (l ) \
132246 do { \
@@ -169,4 +283,36 @@ do { \
169283 spin_unlock(this_cpu_ptr((lock))); \
170284} while (0)
171285
286+ /* localtry_lock_t variants */
287+
288+ #define __localtry_lock_init (lock ) __local_lock_init(lock)
289+ #define __localtry_lock (lock ) __local_lock(lock)
290+ #define __localtry_lock_irq (lock ) __local_lock(lock)
291+ #define __localtry_lock_irqsave (lock , flags ) __local_lock_irqsave(lock, flags)
292+ #define __localtry_unlock (lock ) __local_unlock(lock)
293+ #define __localtry_unlock_irq (lock ) __local_unlock(lock)
294+ #define __localtry_unlock_irqrestore (lock , flags ) __local_unlock_irqrestore(lock, flags)
295+
296+ #define __localtry_trylock (lock ) \
297+ ({ \
298+ int __locked; \
299+ \
300+ if (in_nmi() | in_hardirq()) { \
301+ __locked = 0; \
302+ } else { \
303+ migrate_disable(); \
304+ __locked = spin_trylock(this_cpu_ptr((lock))); \
305+ if (!__locked) \
306+ migrate_enable(); \
307+ } \
308+ __locked; \
309+ })
310+
311+ #define __localtry_trylock_irqsave (lock , flags ) \
312+ ({ \
313+ typecheck(unsigned long, flags); \
314+ flags = 0; \
315+ __localtry_trylock(lock); \
316+ })
317+
172318#endif /* CONFIG_PREEMPT_RT */
0 commit comments