|
2 | 2 |
|
3 | 3 | #include <atomic> |
4 | 4 | #include <memory> |
| 5 | +#include <vector> |
5 | 6 |
|
6 | 7 | namespace atk |
7 | 8 | { |
8 | 9 |
|
| 10 | +// Atomic shared_ptr wrapper compatible with Apple libc++ (which lacks std::atomic<shared_ptr>). |
| 11 | +// Uses a spinlock for synchronization - safe for infrequent updates from UI thread |
| 12 | +// with frequent reads from audio thread. |
| 13 | +// |
| 14 | +// THREAD SAFETY: |
| 15 | +// - All operations are thread-safe |
| 16 | +// - Readers spin briefly if a write is in progress (nanoseconds) |
| 17 | +// |
| 18 | +// DESTRUCTION SAFETY: |
| 19 | +// - Old values are held by the writer until the next store() |
| 20 | +// - This ensures destruction happens on the writer thread, not reader |
| 21 | +// |
| 22 | +// USAGE CONTRACT: |
| 23 | +// - Writers should be infrequent (UI thread updates) |
| 24 | +// - Readers can be frequent (audio thread) |
| 25 | +// |
| 26 | +// NOTE: memory_order parameters are accepted for API compatibility but ignored. |
9 | 27 | template <typename T> |
10 | 28 | class AtomicSharedPtr |
11 | 29 | { |
12 | 30 | public: |
13 | 31 | AtomicSharedPtr() |
14 | | - : ptr(nullptr) |
| 32 | + : spinlock(new std::atomic<bool>(false)) |
15 | 33 | { |
16 | 34 | } |
17 | 35 |
|
18 | 36 | explicit AtomicSharedPtr(std::shared_ptr<T> p) |
19 | | - : ptr(nullptr) |
| 37 | + : ptr(std::move(p)) |
| 38 | + , spinlock(new std::atomic<bool>(false)) |
20 | 39 | { |
21 | | - store(std::move(p), std::memory_order_relaxed); |
22 | 40 | } |
23 | 41 |
|
24 | 42 | ~AtomicSharedPtr() |
25 | 43 | { |
26 | | - auto p = ptr.exchange(nullptr, std::memory_order_relaxed); |
27 | | - delete p; |
| 44 | + delete spinlock; |
28 | 45 | } |
29 | 46 |
|
30 | | - // Non-copyable and non-movable (like std::atomic) |
| 47 | + // Non-copyable and non-movable |
31 | 48 | AtomicSharedPtr(const AtomicSharedPtr&) = delete; |
32 | 49 | AtomicSharedPtr& operator=(const AtomicSharedPtr&) = delete; |
33 | 50 | AtomicSharedPtr(AtomicSharedPtr&&) = delete; |
34 | 51 | AtomicSharedPtr& operator=(AtomicSharedPtr&&) = delete; |
35 | 52 |
|
36 | | - std::shared_ptr<T> load(std::memory_order order = std::memory_order_seq_cst) const |
| 53 | + std::shared_ptr<T> load([[maybe_unused]] std::memory_order order = std::memory_order_acquire) const |
37 | 54 | { |
38 | | - auto p = ptr.load(order); |
39 | | - if (p) |
40 | | - return *p; |
41 | | - return std::shared_ptr<T>(); |
| 55 | + lock(); |
| 56 | + auto result = ptr; |
| 57 | + unlock(); |
| 58 | + return result; |
| 59 | + } |
| 60 | + |
| 61 | + void store(std::shared_ptr<T> desired, [[maybe_unused]] std::memory_order order = std::memory_order_release) |
| 62 | + { |
| 63 | + (void)exchange(std::move(desired)); |
42 | 64 | } |
43 | 65 |
|
44 | | - void store(std::shared_ptr<T> desired, std::memory_order order = std::memory_order_seq_cst) |
| 66 | + [[nodiscard]] std::shared_ptr<T> |
| 67 | + exchange(std::shared_ptr<T> desired, [[maybe_unused]] std::memory_order order = std::memory_order_acq_rel) |
45 | 68 | { |
46 | | - auto newPtr = new std::shared_ptr<T>(std::move(desired)); |
47 | | - auto oldPtr = ptr.exchange(newPtr, order); |
48 | | - delete oldPtr; |
| 69 | + lock(); |
| 70 | + auto old = std::move(ptr); |
| 71 | + ptr = std::move(desired); |
| 72 | + unlock(); |
| 73 | + |
| 74 | + // Keep old values alive to ensure destruction happens here (writer thread), |
| 75 | + // not when reader's copy goes out of scope. |
| 76 | + if (old) |
| 77 | + retained.push_back(std::move(old)); |
| 78 | + |
| 79 | + // Remove entries where we're the sole owner (refcount == 1) |
| 80 | + // Safe to delete - no readers have copies |
| 81 | + auto it = retained.begin(); |
| 82 | + while (it != retained.end()) |
| 83 | + if (it->use_count() == 1) |
| 84 | + it = retained.erase(it); |
| 85 | + else |
| 86 | + ++it; |
| 87 | + |
| 88 | + return retained.empty() ? nullptr : retained.back(); |
49 | 89 | } |
50 | 90 |
|
51 | | - std::shared_ptr<T> exchange(std::shared_ptr<T> desired, std::memory_order order = std::memory_order_seq_cst) |
| 91 | +private: |
| 92 | + void lock() const |
52 | 93 | { |
53 | | - auto newPtr = new std::shared_ptr<T>(std::move(desired)); |
54 | | - auto oldPtr = ptr.exchange(newPtr, order); |
55 | | - std::shared_ptr<T> result; |
56 | | - if (oldPtr) |
| 94 | + bool expected = false; |
| 95 | + while (!spinlock->compare_exchange_weak(expected, true, std::memory_order_acquire)) |
57 | 96 | { |
58 | | - result = *oldPtr; |
59 | | - delete oldPtr; |
| 97 | + expected = false; |
| 98 | + while (spinlock->load(std::memory_order_relaxed)) |
| 99 | + ; |
60 | 100 | } |
61 | | - return result; |
62 | 101 | } |
63 | 102 |
|
64 | | -private: |
65 | | - mutable std::atomic<std::shared_ptr<T>*> ptr; |
| 103 | + void unlock() const |
| 104 | + { |
| 105 | + spinlock->store(false, std::memory_order_release); |
| 106 | + } |
| 107 | + |
| 108 | + std::shared_ptr<T> ptr; |
| 109 | + std::atomic<bool>* spinlock; |
| 110 | + |
| 111 | + // Prevent destruction on reader thread by keeping old values alive |
| 112 | + std::vector<std::shared_ptr<T>> retained; |
66 | 113 | }; |
67 | 114 |
|
68 | 115 | } // namespace atk |
0 commit comments