Skip to content

Commit 00eea05

Browse files
kkdwvdKernel Patches Daemon
authored andcommitted
rqspinlock: Introduce res_spin_trylock
A trylock variant for rqspinlock was missing owing to lack of users in the tree thus far, add one now as it would be needed in subsequent patches. Mark as __must_check and __always_inline. This essentially copies queued_spin_trylock, but doesn't depend on it as rqspinlock compiles down to a TAS when CONFIG_QUEUED_SPINLOCKS=n. Signed-off-by: Kumar Kartikeya Dwivedi <[email protected]>
1 parent 399fdcb commit 00eea05

File tree

1 file changed

+45
-0
lines changed

1 file changed

+45
-0
lines changed

include/asm-generic/rqspinlock.h

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -217,12 +217,57 @@ static __always_inline void res_spin_unlock(rqspinlock_t *lock)
217217
this_cpu_dec(rqspinlock_held_locks.cnt);
218218
}
219219

220+
/**
221+
* res_spin_trylock - try to acquire a queued spinlock
222+
* @lock: Pointer to queued spinlock structure
223+
*
224+
* Attempts to acquire the lock without blocking. This function should be used
225+
* in contexts where blocking is not allowed (e.g., NMI handlers).
226+
*
227+
* Return:
228+
* * 1 - Lock was acquired successfully.
229+
* * 0 - Lock acquisition failed.
230+
*/
231+
static __must_check __always_inline int res_spin_trylock(rqspinlock_t *lock)
232+
{
233+
int val = atomic_read(&lock->val);
234+
int ret;
235+
236+
if (unlikely(val))
237+
return 0;
238+
239+
ret = likely(atomic_try_cmpxchg_acquire(&lock->val, &val, 1));
240+
if (ret)
241+
grab_held_lock_entry(lock);
242+
return ret;
243+
}
244+
220245
#ifdef CONFIG_QUEUED_SPINLOCKS
221246
#define raw_res_spin_lock_init(lock) ({ *(lock) = (rqspinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; })
222247
#else
223248
#define raw_res_spin_lock_init(lock) ({ *(lock) = (rqspinlock_t){0}; })
224249
#endif
225250

251+
#define raw_res_spin_trylock(lock) \
252+
({ \
253+
int __ret; \
254+
preempt_disable(); \
255+
__ret = res_spin_trylock(lock); \
256+
if (!__ret) \
257+
preempt_enable(); \
258+
__ret; \
259+
})
260+
261+
#define raw_res_spin_trylock_irqsave(lock, flags) \
262+
({ \
263+
int __ret; \
264+
local_irq_save(flags); \
265+
__ret = raw_res_spin_trylock(lock); \
266+
if (!__ret) \
267+
local_irq_restore(flags); \
268+
__ret; \
269+
})
270+
226271
#define raw_res_spin_lock(lock) \
227272
({ \
228273
int __ret; \

0 commit comments

Comments
 (0)