Skip to content

Commit 9b8ed3d

Browse files
committed
refactor: Add clang thread safety annotations to EventLoop
Add basic thread safety annotations to EventLoop. Use could be expanded and deepened but this puts basic functionality in place. Use of annotations was discussed in #129 (comment)
1 parent 52256e7 commit 9b8ed3d

File tree

4 files changed

+70
-30
lines changed

4 files changed

+70
-30
lines changed

include/mp/proxy-io.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -189,10 +189,10 @@ class EventLoop
189189
//! is important that ProxyServer::m_impl destructors do not run on the
190190
//! eventloop thread because they may need it to do I/O if they perform
191191
//! other IPC calls.
192-
void startAsyncThread(std::unique_lock<std::mutex>& lock);
192+
void startAsyncThread() MP_REQUIRES(m_mutex);
193193

194194
//! Check if loop should exit.
195-
bool done(std::unique_lock<std::mutex>& lock) const;
195+
bool done() const MP_REQUIRES(m_mutex);
196196

197197
Logger log()
198198
{
@@ -215,10 +215,10 @@ class EventLoop
215215
std::thread m_async_thread;
216216

217217
//! Callback function to run on event loop thread during post() or sync() call.
218-
kj::Function<void()>* m_post_fn = nullptr;
218+
kj::Function<void()>* m_post_fn MP_GUARDED_BY(m_mutex) = nullptr;
219219

220220
//! Callback functions to run on async thread.
221-
CleanupList m_async_fns;
221+
CleanupList m_async_fns MP_GUARDED_BY(m_mutex);
222222

223223
//! Pipe read handle used to wake up the event loop thread.
224224
int m_wait_fd = -1;
@@ -228,11 +228,11 @@ class EventLoop
228228

229229
//! Number of clients holding references to ProxyServerBase objects that
230230
//! reference this event loop.
231-
int m_num_clients = 0;
231+
int m_num_clients MP_GUARDED_BY(m_mutex) = 0;
232232

233233
//! Mutex and condition variable used to post tasks to event loop and async
234234
//! thread.
235-
std::mutex m_mutex;
235+
Mutex m_mutex;
236236
std::condition_variable m_cv;
237237

238238
//! Capnp IO context.

include/mp/proxy.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ inline void CleanupRun(CleanupList& fns) {
5454
class EventLoopRef
5555
{
5656
public:
57-
explicit EventLoopRef(EventLoop& loop, std::unique_lock<std::mutex>* lock = nullptr);
57+
explicit EventLoopRef(EventLoop& loop, Lock* lock = nullptr);
5858
EventLoopRef(EventLoopRef&& other) noexcept : m_loop(other.m_loop) { other.m_loop = nullptr; }
5959
EventLoopRef(const EventLoopRef&) = delete;
6060
EventLoopRef& operator=(const EventLoopRef&) = delete;
@@ -65,7 +65,7 @@ class EventLoopRef
6565
bool reset();
6666

6767
EventLoop* m_loop{nullptr};
68-
std::unique_lock<std::mutex>* m_lock{nullptr};
68+
Lock* m_lock{nullptr};
6969
};
7070

7171
//! Context data associated with proxy client and server classes.

include/mp/util.h

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,13 +6,15 @@
66
#define MP_UTIL_H
77

88
#include <capnp/schema.h>
9+
#include <cassert>
910
#include <cstddef>
1011
#include <functional>
1112
#include <future>
1213
#include <kj/common.h>
1314
#include <kj/exception.h>
1415
#include <kj/string-tree.h>
1516
#include <memory>
17+
#include <mutex>
1618
#include <string.h>
1719
#include <string>
1820
#include <tuple>
@@ -145,6 +147,44 @@ struct PtrOrValue {
145147
T* operator->() const { return &**this; }
146148
};
147149

150+
// Annotated mutex and lock class (https://clang.llvm.org/docs/ThreadSafetyAnalysis.html)
151+
#if defined(__clang__) && (!defined(SWIG))
152+
#define MP_TSA(x) __attribute__((x))
153+
#else
154+
#define MP_TSA(x) // no-op
155+
#endif
156+
157+
#define MP_CAPABILITY(x) MP_TSA(capability(x))
158+
#define MP_SCOPED_CAPABILITY MP_TSA(scoped_lockable)
159+
#define MP_REQUIRES(x) MP_TSA(requires_capability(x))
160+
#define MP_ACQUIRE(...) MP_TSA(acquire_capability(__VA_ARGS__))
161+
#define MP_RELEASE(...) MP_TSA(release_capability(__VA_ARGS__))
162+
#define MP_ASSERT_CAPABILITY(x) MP_TSA(assert_capability(x))
163+
#define MP_GUARDED_BY(x) MP_TSA(guarded_by(x))
164+
165+
class MP_CAPABILITY("mutex") Mutex {
166+
public:
167+
void lock() MP_ACQUIRE() { m_mutex.lock(); }
168+
void unlock() MP_RELEASE() { m_mutex.unlock(); }
169+
170+
std::mutex m_mutex;
171+
};
172+
173+
class MP_SCOPED_CAPABILITY Lock {
174+
public:
175+
explicit Lock(Mutex& m) MP_ACQUIRE(m) : m_lock(m.m_mutex) {}
176+
~Lock() MP_RELEASE() {}
177+
void unlock() MP_RELEASE() { m_lock.unlock(); }
178+
void lock() MP_ACQUIRE() { m_lock.lock(); }
179+
void assert_locked(Mutex& mutex) MP_ASSERT_CAPABILITY() MP_ASSERT_CAPABILITY(mutex)
180+
{
181+
assert(m_lock.mutex() == &mutex.m_mutex);
182+
assert(m_lock);
183+
}
184+
185+
std::unique_lock<std::mutex> m_lock;
186+
};
187+
148188
//! Analog to std::lock_guard that unlocks instead of locks.
149189
template <typename Lock>
150190
struct UnlockGuard

src/mp/proxy.cpp

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -49,9 +49,10 @@ void LoggingErrorHandler::taskFailed(kj::Exception&& exception)
4949
m_loop.log() << "Uncaught exception in daemonized task.";
5050
}
5151

52-
EventLoopRef::EventLoopRef(EventLoop& loop, std::unique_lock<std::mutex>* lock) : m_loop(&loop), m_lock(lock)
52+
EventLoopRef::EventLoopRef(EventLoop& loop, Lock* lock) : m_loop(&loop), m_lock(lock)
5353
{
5454
auto loop_lock{PtrOrValue{m_lock, m_loop->m_mutex}};
55+
loop_lock->assert_locked(m_loop->m_mutex);
5556
m_loop->m_num_clients += 1;
5657
}
5758

@@ -61,9 +62,10 @@ bool EventLoopRef::reset()
6162
if (auto* loop{m_loop}) {
6263
m_loop = nullptr;
6364
auto loop_lock{PtrOrValue{m_lock, loop->m_mutex}};
65+
loop_lock->assert_locked(loop->m_mutex);
6466
assert(loop->m_num_clients > 0);
6567
loop->m_num_clients -= 1;
66-
if (loop->done(*loop_lock)) {
68+
if (loop->done()) {
6769
done = true;
6870
loop->m_cv.notify_all();
6971
int post_fd{loop->m_post_fd};
@@ -134,17 +136,17 @@ Connection::~Connection()
134136
m_sync_cleanup_fns.pop_front();
135137
}
136138
while (!m_async_cleanup_fns.empty()) {
137-
const std::unique_lock<std::mutex> lock(m_loop->m_mutex);
139+
const Lock lock(m_loop->m_mutex);
138140
m_loop->m_async_fns.emplace_back(std::move(m_async_cleanup_fns.front()));
139141
m_async_cleanup_fns.pop_front();
140142
}
141-
std::unique_lock<std::mutex> lock(m_loop->m_mutex);
142-
m_loop->startAsyncThread(lock);
143+
Lock lock(m_loop->m_mutex);
144+
m_loop->startAsyncThread();
143145
}
144146

145147
CleanupIt Connection::addSyncCleanup(std::function<void()> fn)
146148
{
147-
const std::unique_lock<std::mutex> lock(m_loop->m_mutex);
149+
const Lock lock(m_loop->m_mutex);
148150
// Add cleanup callbacks to the front of list, so sync cleanup functions run
149151
// in LIFO order. This is a good approach because sync cleanup functions are
150152
// added as client objects are created, and it is natural to clean up
@@ -158,13 +160,13 @@ CleanupIt Connection::addSyncCleanup(std::function<void()> fn)
158160

159161
void Connection::removeSyncCleanup(CleanupIt it)
160162
{
161-
const std::unique_lock<std::mutex> lock(m_loop->m_mutex);
163+
const Lock lock(m_loop->m_mutex);
162164
m_sync_cleanup_fns.erase(it);
163165
}
164166

165167
void Connection::addAsyncCleanup(std::function<void()> fn)
166168
{
167-
const std::unique_lock<std::mutex> lock(m_loop->m_mutex);
169+
const Lock lock(m_loop->m_mutex);
168170
// Add async cleanup callbacks to the back of the list. Unlike the sync
169171
// cleanup list, this list order is more significant because it determines
170172
// the order server objects are destroyed when there is a sudden disconnect,
@@ -200,7 +202,7 @@ EventLoop::EventLoop(const char* exe_name, LogFn log_fn, void* context)
200202
EventLoop::~EventLoop()
201203
{
202204
if (m_async_thread.joinable()) m_async_thread.join();
203-
const std::lock_guard<std::mutex> lock(m_mutex);
205+
const Lock lock(m_mutex);
204206
KJ_ASSERT(m_post_fn == nullptr);
205207
KJ_ASSERT(m_async_fns.empty());
206208
KJ_ASSERT(m_wait_fd == -1);
@@ -225,12 +227,12 @@ void EventLoop::loop()
225227
for (;;) {
226228
const size_t read_bytes = wait_stream->read(&buffer, 0, 1).wait(m_io_context.waitScope);
227229
if (read_bytes != 1) throw std::logic_error("EventLoop wait_stream closed unexpectedly");
228-
std::unique_lock<std::mutex> lock(m_mutex);
230+
Lock lock(m_mutex);
229231
if (m_post_fn) {
230232
Unlock(lock, *m_post_fn);
231233
m_post_fn = nullptr;
232234
m_cv.notify_all();
233-
} else if (done(lock)) {
235+
} else if (done()) {
234236
// Intentionally do not break if m_post_fn was set, even if done()
235237
// would return true, to ensure that the EventLoopRef write(post_fd)
236238
// call always succeeds and the loop does not exit between the time
@@ -243,7 +245,7 @@ void EventLoop::loop()
243245
log() << "EventLoop::loop bye.";
244246
wait_stream = nullptr;
245247
KJ_SYSCALL(::close(post_fd));
246-
const std::unique_lock<std::mutex> lock(m_mutex);
248+
const Lock lock(m_mutex);
247249
m_wait_fd = -1;
248250
m_post_fd = -1;
249251
}
@@ -254,27 +256,27 @@ void EventLoop::post(kj::Function<void()> fn)
254256
fn();
255257
return;
256258
}
257-
std::unique_lock<std::mutex> lock(m_mutex);
259+
Lock lock(m_mutex);
258260
EventLoopRef ref(*this, &lock);
259-
m_cv.wait(lock, [this] { return m_post_fn == nullptr; });
261+
m_cv.wait(lock.m_lock, [this]() MP_REQUIRES(m_mutex) { return m_post_fn == nullptr; });
260262
m_post_fn = &fn;
261263
int post_fd{m_post_fd};
262264
Unlock(lock, [&] {
263265
char buffer = 0;
264266
KJ_SYSCALL(write(post_fd, &buffer, 1));
265267
});
266-
m_cv.wait(lock, [this, &fn] { return m_post_fn != &fn; });
268+
m_cv.wait(lock.m_lock, [this, &fn]() MP_REQUIRES(m_mutex) { return m_post_fn != &fn; });
267269
}
268270

269-
void EventLoop::startAsyncThread(std::unique_lock<std::mutex>& lock)
271+
void EventLoop::startAsyncThread()
270272
{
271273
if (m_async_thread.joinable()) {
272274
// Notify to wake up the async thread if it is already running.
273275
m_cv.notify_all();
274276
} else if (!m_async_fns.empty()) {
275277
m_async_thread = std::thread([this] {
276-
std::unique_lock<std::mutex> lock(m_mutex);
277-
while (!done(lock)) {
278+
Lock lock(m_mutex);
279+
while (!done()) {
278280
if (!m_async_fns.empty()) {
279281
EventLoopRef ref{*this, &lock};
280282
const std::function<void()> fn = std::move(m_async_fns.front());
@@ -289,17 +291,15 @@ void EventLoop::startAsyncThread(std::unique_lock<std::mutex>& lock)
289291
// Continue without waiting in case there are more async_fns
290292
continue;
291293
}
292-
m_cv.wait(lock);
294+
m_cv.wait(lock.m_lock);
293295
}
294296
});
295297
}
296298
}
297299

298-
bool EventLoop::done(std::unique_lock<std::mutex>& lock) const
300+
bool EventLoop::done() const
299301
{
300302
assert(m_num_clients >= 0);
301-
assert(lock.owns_lock());
302-
assert(lock.mutex() == &m_mutex);
303303
return m_num_clients == 0 && m_async_fns.empty();
304304
}
305305

0 commit comments

Comments
 (0)