Skip to content

Commit d1d43ab

Browse files
authored
sync: fix spin lock, add destroy() and try_lock(), add valgrind annotate support (vlang#24798)
1 parent 6b45931 commit d1d43ab

File tree

3 files changed

+75
-9
lines changed

3 files changed

+75
-9
lines changed

vlib/sync/spinlock_test.v

Lines changed: 27 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,22 +1,42 @@
11
import sync
2+
import rand
3+
import time
24

35
fn test_spinlock() {
46
mut counter := 0
57
mut s := sync.new_spin_lock()
8+
defer {
9+
s.destroy()
10+
}
611
num_threads := 10
12+
iterations := 10
713
mut wg := sync.new_waitgroup()
814
wg.add(num_threads)
915

1016
for _ in 0 .. num_threads {
11-
spawn fn (mut wg sync.WaitGroup, s &sync.SpinLock, counter_ref &int) {
12-
defer {
17+
spawn fn (mut wg sync.WaitGroup, s &sync.SpinLock, counter_ref &int, iterations int) {
18+
for _ in 0 .. iterations {
19+
s.lock()
20+
21+
unsafe {
22+
tmp := *counter_ref
23+
randval := rand.intn(100) or { 1 }
24+
time.sleep(randval * time.nanosecond)
25+
26+
(*counter_ref) = tmp + 1
27+
}
1328
s.unlock()
14-
wg.done()
1529
}
16-
s.lock()
17-
(*counter_ref)++
18-
}(mut wg, s, &counter)
30+
wg.done()
31+
}(mut wg, s, &counter, iterations)
1932
}
2033
wg.wait()
21-
assert counter == num_threads
34+
assert counter == num_threads * iterations
35+
36+
// test try_lock()
37+
s.lock()
38+
assert s.try_lock() == false
39+
s.unlock()
40+
assert s.try_lock() == true
41+
assert s.try_lock() == false
2242
}

vlib/sync/stdatomic/1.declarations.c.v

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -109,3 +109,13 @@ fn C.atomic_fetch_sub_u64(voidptr, u64) u64
109109

110110
fn C.atomic_thread_fence(int)
111111
fn C.cpu_relax()
112+
113+
fn C.ANNOTATE_RWLOCK_CREATE(voidptr)
114+
fn C.ANNOTATE_RWLOCK_ACQUIRED(voidptr, int)
115+
fn C.ANNOTATE_RWLOCK_RELEASED(voidptr, int)
116+
fn C.ANNOTATE_RWLOCK_DESTROY(voidptr)
117+
118+
$if valgrind ? {
119+
#flag -I/usr/include/valgrind
120+
#include <valgrind/helgrind.h>
121+
}

vlib/sync/sync.c.v

Lines changed: 38 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ fn should_be_zero(res int) {
2121
// SpinLock is a mutual exclusion lock that busy-waits (spins) when locked.
2222
// When one thread holds the lock, any other thread attempting to acquire it
2323
// will loop repeatedly until the lock becomes available.
24+
@[noinit]
2425
pub struct SpinLock {
2526
mut:
2627
locked u8 // Lock state: 0 = unlocked, 1 = locked
@@ -34,6 +35,9 @@ pub fn new_spin_lock() &SpinLock {
3435
}
3536
// Ensure initialization visibility across threads
3637
C.atomic_thread_fence(C.memory_order_release)
38+
$if valgrind ? {
39+
C.ANNOTATE_RWLOCK_CREATE(&the_lock.locked)
40+
}
3741
return the_lock
3842
}
3943

@@ -54,6 +58,9 @@ pub fn (s &SpinLock) lock() {
5458
// Succeeds if current value matches expected (0),
5559
// then swaps to locked (1)
5660
if C.atomic_compare_exchange_weak_byte(&s.locked, &expected, 1) {
61+
$if valgrind ? {
62+
C.ANNOTATE_RWLOCK_ACQUIRED(&s.locked, 1) // 1 = write lock
63+
}
5764
// Prevent critical section reordering
5865
C.atomic_thread_fence(C.memory_order_acquire)
5966
return
@@ -71,18 +78,47 @@ pub fn (s &SpinLock) lock() {
7178
C.cpu_relax()
7279
}
7380

74-
// Refresh lock state before next attempt
75-
expected = C.atomic_load_byte(&s.locked)
81+
expected = 0
82+
}
83+
}
84+
85+
// try_lock try to lock the spin lock instance and return immediately.
86+
// If the spin lock was already locked, it will return false.
87+
@[inline]
88+
pub fn (s &SpinLock) try_lock() bool {
89+
// First do a relaxed load to check if lock is free in order to prevent
90+
// unnecessary cache misses if someone does while(!try_lock())
91+
// TODO: make a `relaxed` load
92+
if C.atomic_load_byte(&s.locked) == 0 {
93+
mut expected := u8(0)
94+
if C.atomic_compare_exchange_weak_byte(&s.locked, &expected, 1) {
95+
$if valgrind ? {
96+
C.ANNOTATE_RWLOCK_ACQUIRED(&s.locked, 1)
97+
}
98+
C.atomic_thread_fence(C.memory_order_acquire)
99+
return true
100+
}
76101
}
102+
return false
77103
}
78104

79105
// unlock releases the spin lock, making it available to other threads.
80106
// IMPORTANT: Must only be called by the thread that currently holds the lock.
81107
@[inline]
82108
pub fn (s &SpinLock) unlock() {
109+
$if valgrind ? {
110+
C.ANNOTATE_RWLOCK_RELEASED(&s.locked, 1) // 1 = write lock
111+
}
83112
// Ensure critical section completes before release
84113
C.atomic_thread_fence(C.memory_order_release)
85114

86115
// Atomically reset to unlocked state
87116
C.atomic_store_byte(&s.locked, 0)
88117
}
118+
119+
// destroy frees the resources associated with the spin lock instance.
120+
pub fn (s &SpinLock) destroy() {
121+
$if valgrind ? {
122+
C.ANNOTATE_RWLOCK_DESTROY(&s.locked)
123+
}
124+
}

0 commit comments

Comments
 (0)