|
| 1 | +# Adapted from ConcurrentUtils/src/read_write_lock.jl |
| 2 | + |
| 3 | +import UnsafeAtomics |
| 4 | + |
| 5 | +abstract type AbstractReadWriteLock <: Base.AbstractLock end |
| 6 | + |
| 7 | +const NOTLOCKED = UInt64(0) |
| 8 | +const NREADERS_INC = UInt64(2) |
| 9 | +const WRITELOCK_MASK = UInt64(1) |
| 10 | + |
| 11 | +const NReadersAndWritelock = UInt64 |
| 12 | + |
| 13 | +mutable struct ReadWriteLock <: AbstractReadWriteLock |
| 14 | + @atomic nreaders_and_writelock::NReadersAndWritelock |
| 15 | + # TODO: use condition variables with lock-free notify |
| 16 | + const lock::ReentrantLock |
| 17 | + const cond_read::Threads.Condition |
| 18 | + const cond_write::Threads.Condition |
| 19 | +end |
| 20 | + |
| 21 | +function fieldoffset_by_name(T, field) |
| 22 | + for idx in 1:nfields(T) |
| 23 | + if fieldnames(T)[idx] == field |
| 24 | + return fieldoffset(T, idx) |
| 25 | + end |
| 26 | + end |
| 27 | + error("No such field for $T: $field") |
| 28 | +end |
| 29 | +const OFFSET_NREADERS_AND_WRITELOCK = |
| 30 | + fieldoffset_by_name(ReadWriteLock, :nreaders_and_writelock) |
| 31 | + |
| 32 | +function ReadWriteLock() |
| 33 | + lock = ReentrantLock() |
| 34 | + cond_read = Threads.Condition(lock) |
| 35 | + cond_write = Threads.Condition(lock) |
| 36 | + return ReadWriteLock(NOTLOCKED, lock, cond_read, cond_write) |
| 37 | +end |
| 38 | + |
| 39 | +# Not very efficient but lock-free |
| 40 | +function trylock_read(rwlock::ReadWriteLock; nspins = -∞, ntries = -∞) |
| 41 | + local ns::Int = 0 |
| 42 | + local nt::Int = 0 |
| 43 | + while true |
| 44 | + old = @atomic :monotonic rwlock.nreaders_and_writelock |
| 45 | + if iszero(old & WRITELOCK_MASK) |
| 46 | + # Try to acquire reader lock without the responsibility to receive or send the |
| 47 | + # notification: |
| 48 | + old, success = @atomicreplace( |
| 49 | + :acquire_release, |
| 50 | + :monotonic, |
| 51 | + rwlock.nreaders_and_writelock, |
| 52 | + old => old + NREADERS_INC, |
| 53 | + ) |
| 54 | + success && return true |
| 55 | + nt += 1 |
| 56 | + nt < ntries || return false |
| 57 | + end |
| 58 | + ns += 1 |
| 59 | + ns < nspins || return false |
| 60 | + end |
| 61 | +end |
| 62 | + |
| 63 | +function lock_read(rwlock::ReadWriteLock) |
| 64 | + |
| 65 | + # Using hardware FAA |
| 66 | + ptr = Ptr{NReadersAndWritelock}( |
| 67 | + pointer_from_objref(rwlock) + OFFSET_NREADERS_AND_WRITELOCK, |
| 68 | + ) |
| 69 | + GC.@preserve rwlock begin |
| 70 | + _, n = UnsafeAtomics.modify!(ptr, +, NREADERS_INC, UnsafeAtomics.acq_rel) |
| 71 | + end |
| 72 | + # n = @atomic :acquire_release rwlock.nreaders_and_writelock += NREADERS_INC |
| 73 | + |
| 74 | + if iszero(n & WRITELOCK_MASK) |
| 75 | + return |
| 76 | + end |
| 77 | + lock(rwlock.lock) do |
| 78 | + while true |
| 79 | + local n = @atomic :acquire rwlock.nreaders_and_writelock |
| 80 | + if iszero(n & WRITELOCK_MASK) |
| 81 | + @assert n > 0 |
| 82 | + return |
| 83 | + end |
| 84 | + wait(rwlock.cond_read) |
| 85 | + end |
| 86 | + end |
| 87 | +end |
| 88 | + |
| 89 | +function unlock_read(rwlock::ReadWriteLock) |
| 90 | + |
| 91 | + # Using hardware FAA |
| 92 | + ptr = Ptr{NReadersAndWritelock}( |
| 93 | + pointer_from_objref(rwlock) + OFFSET_NREADERS_AND_WRITELOCK, |
| 94 | + ) |
| 95 | + GC.@preserve rwlock begin |
| 96 | + _, n = UnsafeAtomics.modify!(ptr, -, NREADERS_INC, UnsafeAtomics.acq_rel) |
| 97 | + end |
| 98 | + # n = @atomic :acquire_release rwlock.nreaders_and_writelock -= NREADERS_INC |
| 99 | + |
| 100 | + @assert iszero(n & WRITELOCK_MASK) |
| 101 | + if iszero(n) |
| 102 | + lock(rwlock.lock) do |
| 103 | + notify(rwlock.cond_write; all = false) |
| 104 | + end |
| 105 | + end |
| 106 | + return |
| 107 | +end |
| 108 | + |
| 109 | +function Base.trylock(rwlock::ReadWriteLock) |
| 110 | + _, success = @atomicreplace( |
| 111 | + :acquire_release, |
| 112 | + :monotonic, |
| 113 | + rwlock.nreaders_and_writelock, |
| 114 | + NOTLOCKED => WRITELOCK_MASK, |
| 115 | + ) |
| 116 | + return success::Bool |
| 117 | +end |
| 118 | + |
| 119 | +function Base.lock(rwlock::ReadWriteLock) |
| 120 | + if trylock(rwlock) |
| 121 | + return |
| 122 | + end |
| 123 | + lock(rwlock.lock) do |
| 124 | + while true |
| 125 | + if trylock(rwlock) |
| 126 | + return |
| 127 | + end |
| 128 | + wait(rwlock.cond_write) |
| 129 | + end |
| 130 | + end |
| 131 | +end |
| 132 | + |
| 133 | +function Base.unlock(rwlock::ReadWriteLock) |
| 134 | + @assert !iszero(rwlock.nreaders_and_writelock & WRITELOCK_MASK) |
| 135 | + @atomic :acquire_release rwlock.nreaders_and_writelock &= ~WRITELOCK_MASK |
| 136 | + lock(rwlock.lock) do |
| 137 | + notify(rwlock.cond_read) |
| 138 | + notify(rwlock.cond_write; all = false) |
| 139 | + end |
| 140 | + return |
| 141 | +end |
| 142 | + |
| 143 | +### |
| 144 | +### High-level APIs |
| 145 | +### |
| 146 | + |
| 147 | +lock_read(lck) = lock(lck) |
| 148 | +unlock_read(lck) = unlock(lck) |
| 149 | + |
| 150 | +function lock_read(f, lock) |
| 151 | + lock_read(lock) |
| 152 | + try |
| 153 | + return f() |
| 154 | + finally |
| 155 | + unlock_read(lock) |
| 156 | + end |
| 157 | +end |
0 commit comments