33import . Base: unsafe_convert, lock, trylock, unlock, islocked, wait, notify, AbstractLock
44
55export SpinLock
6-
6+ public PaddedSpinLock
77# Important Note: these low-level primitives defined here
88# are typically not for general usage
99
@@ -12,33 +12,68 @@ export SpinLock
1212# #########################################
1313
1414"""
15- SpinLock()
15+ abstract type AbstractSpinLock <: AbstractLock end
1616
17- Create a non-reentrant, test-and-test-and-set spin lock.
17+ A non-reentrant, test-and-test-and-set spin lock.
1818Recursive use will result in a deadlock.
1919This kind of lock should only be used around code that takes little time
2020to execute and does not block (e.g. perform I/O).
2121In general, [`ReentrantLock`](@ref) should be used instead.
2222
2323Each [`lock`](@ref) must be matched with an [`unlock`](@ref).
24- If [`!islocked(lck::SpinLock )`](@ref islocked) holds, [`trylock(lck)`](@ref trylock)
24+ If [`!islocked(lck::AbstractSpinLock )`](@ref islocked) holds, [`trylock(lck)`](@ref trylock)
2525succeeds unless there are other tasks attempting to hold the lock "at the same time."
2626
2727Test-and-test-and-set spin locks are quickest up to about 30ish
2828contending threads. If you have more contention than that, different
2929synchronization approaches should be considered.
3030"""
31- mutable struct SpinLock <: AbstractLock
31+ abstract type AbstractSpinLock <: AbstractLock end
32+
33+ """
34+ SpinLock() <: AbstractSpinLock
35+
36+ Spinlocks are not padded, and so may suffer from false sharing.
37+ See also [`PaddedSpinLock`](@ref).
38+
39+ See the documentation for [`AbstractSpinLock`](@ref) regarding correct usage.
40+ """
41+ mutable struct SpinLock <: AbstractSpinLock
3242 # we make this much larger than necessary to minimize false-sharing
3343 @atomic owned:: Int
3444 SpinLock () = new (0 )
3545end
3646
47+ # TODO : Determine the cache line size using e.g., CPUID. Meanwhile, this is correct for most
48+ # processors.
49+ const CACHE_LINE_SIZE = 64
50+
51+ """
52+ PaddedSpinLock() <: AbstractSpinLock
53+
54+ PaddedSpinLocks are padded so that each is guaranteed to be on its own cache line, to avoid
55+ false sharing.
56+ See also [`SpinLock`](@ref).
57+
58+ See the documentation for [`AbstractSpinLock`](@ref) regarding correct usage.
59+ """
60+ mutable struct PaddedSpinLock <: AbstractSpinLock
61+ # we make this much larger than necessary to minimize false-sharing
62+ _padding_before:: NTuple{max(0, CACHE_LINE_SIZE - sizeof(Int)), UInt8}
63+ @atomic owned:: Int
64+ _padding_after:: NTuple{max(0, CACHE_LINE_SIZE - sizeof(Int)), UInt8}
65+ function PaddedSpinLock ()
66+ l = new ()
67+ @atomic l. owned = 0
68+ return l
69+ end
70+ end
71+
3772# Note: this cannot assert that the lock is held by the correct thread, because we do not
3873# track which thread locked it. Users beware.
39- Base. assert_havelock (l:: SpinLock ) = islocked (l) ? nothing : Base. concurrency_violation ()
74+ Base. assert_havelock (l:: AbstractSpinLock ) = islocked (l) ? nothing : Base. concurrency_violation ()
4075
41- function lock (l:: SpinLock )
76+ function lock (l:: AbstractSpinLock )
4277 while true
4378 if @inline trylock (l)
4479 return
@@ -49,7 +84,7 @@ function lock(l::SpinLock)
4984 end
5085end
5186
52- function trylock (l:: SpinLock )
87+ function trylock (l:: AbstractSpinLock )
5388 if l. owned == 0
5489 GC. disable_finalizers ()
5590 p = @atomicswap :acquire l. owned = 1
@@ -61,7 +96,7 @@ function trylock(l::SpinLock)
6196 return false
6297end
6398
64- function unlock (l:: SpinLock )
99+ function unlock (l:: AbstractSpinLock )
65100 if (@atomicswap :release l. owned = 0 ) == 0
66101 error (" unlock count must match lock count" )
67102 end
@@ -70,6 +105,6 @@ function unlock(l::SpinLock)
70105 return
71106end
72107
73- function islocked (l:: SpinLock )
108+ function islocked (l:: AbstractSpinLock )
74109 return (@atomic :monotonic l. owned) != 0
75110end
0 commit comments