@@ -21,6 +21,7 @@ fn should_be_zero(res int) {
2121// SpinLock is a mutual exclusion lock that busy-waits (spins) when locked.
2222// When one thread holds the lock, any other thread attempting to acquire it
2323// will loop repeatedly until the lock becomes available.
24+ @[noinit]
2425pub struct SpinLock {
2526mut :
2627 locked u8 // Lock state: 0 = unlocked, 1 = locked
@@ -34,6 +35,9 @@ pub fn new_spin_lock() &SpinLock {
3435 }
3536 // Ensure initialization visibility across threads
3637 C.atomic_thread_fence (C.memory_order_release)
38+ $if valgrind ? {
39+ C.ANNOTATE_RWLOCK_CREATE (& the_lock.locked)
40+ }
3741 return the_lock
3842}
3943
@@ -54,6 +58,9 @@ pub fn (s &SpinLock) lock() {
5458 // Succeeds if current value matches expected (0),
5559 // then swaps to locked (1)
5660 if C.atomic_compare_exchange_weak_byte (& s.locked, & expected, 1 ) {
61+ $if valgrind ? {
62+ C.ANNOTATE_RWLOCK_ACQUIRED (& s.locked, 1 ) // 1 = write lock
63+ }
5764 // Prevent critical section reordering
5865 C.atomic_thread_fence (C.memory_order_acquire)
5966 return
@@ -71,18 +78,47 @@ pub fn (s &SpinLock) lock() {
7178 C.cpu_relax ()
7279 }
7380
74- // Refresh lock state before next attempt
75- expected = C.atomic_load_byte (& s.locked)
81+ expected = 0
82+ }
83+ }
84+
85+ // try_lock try to lock the spin lock instance and return immediately.
86+ // If the spin lock was already locked, it will return false.
87+ @[inline]
88+ pub fn (s &SpinLock) try_lock () bool {
89+ // First do a relaxed load to check if lock is free in order to prevent
90+ // unnecessary cache misses if someone does while(!try_lock())
91+ // TODO: make a `relaxed` load
92+ if C.atomic_load_byte (& s.locked) == 0 {
93+ mut expected := u8 (0 )
94+ if C.atomic_compare_exchange_weak_byte (& s.locked, & expected, 1 ) {
95+ $if valgrind ? {
96+ C.ANNOTATE_RWLOCK_ACQUIRED (& s.locked, 1 )
97+ }
98+ C.atomic_thread_fence (C.memory_order_acquire)
99+ return true
100+ }
76101 }
102+ return false
77103}
78104
79105// unlock releases the spin lock, making it available to other threads.
80106// IMPORTANT: Must only be called by the thread that currently holds the lock.
81107@[inline]
82108pub fn (s &SpinLock) unlock () {
109+ $if valgrind ? {
110+ C.ANNOTATE_RWLOCK_RELEASED (& s.locked, 1 ) // 1 = write lock
111+ }
83112 // Ensure critical section completes before release
84113 C.atomic_thread_fence (C.memory_order_release)
85114
86115 // Atomically reset to unlocked state
87116 C.atomic_store_byte (& s.locked, 0 )
88117}
118+
119+ // destroy frees the resources associated with the spin lock instance.
120+ pub fn (s &SpinLock) destroy () {
121+ $if valgrind ? {
122+ C.ANNOTATE_RWLOCK_DESTROY (& s.locked)
123+ }
124+ }
0 commit comments