@@ -65,7 +65,7 @@ bool EventLoopRef::reset()
65
65
return done;
66
66
}
67
67
68
- ProxyContext::ProxyContext (Connection* connection) : connection(connection), loop{& connection->m_loop } {}
68
+ ProxyContext::ProxyContext (Connection* connection) : connection(connection), loop{* connection->m_loop } {}
69
69
70
70
Connection::~Connection ()
71
71
{
@@ -122,18 +122,17 @@ Connection::~Connection()
122
122
m_sync_cleanup_fns.pop_front ();
123
123
}
124
124
while (!m_async_cleanup_fns.empty ()) {
125
- const std::unique_lock<std::mutex> lock (m_loop. m_mutex );
126
- m_loop. m_async_fns .emplace_back (std::move (m_async_cleanup_fns.front ()));
125
+ const std::unique_lock<std::mutex> lock (m_loop-> m_mutex );
126
+ m_loop-> m_async_fns .emplace_back (std::move (m_async_cleanup_fns.front ()));
127
127
m_async_cleanup_fns.pop_front ();
128
128
}
129
- std::unique_lock<std::mutex> lock (m_loop.m_mutex );
130
- m_loop.startAsyncThread (lock);
131
- m_loop.removeClient (lock);
129
+ std::unique_lock<std::mutex> lock (m_loop->m_mutex );
130
+ m_loop->startAsyncThread (lock);
132
131
}
133
132
134
133
CleanupIt Connection::addSyncCleanup (std::function<void ()> fn)
135
134
{
136
- const std::unique_lock<std::mutex> lock (m_loop. m_mutex );
135
+ const std::unique_lock<std::mutex> lock (m_loop-> m_mutex );
137
136
// Add cleanup callbacks to the front of list, so sync cleanup functions run
138
137
// in LIFO order. This is a good approach because sync cleanup functions are
139
138
// added as client objects are created, and it is natural to clean up
@@ -147,13 +146,13 @@ CleanupIt Connection::addSyncCleanup(std::function<void()> fn)
147
146
148
147
void Connection::removeSyncCleanup (CleanupIt it)
149
148
{
150
- const std::unique_lock<std::mutex> lock (m_loop. m_mutex );
149
+ const std::unique_lock<std::mutex> lock (m_loop-> m_mutex );
151
150
m_sync_cleanup_fns.erase (it);
152
151
}
153
152
154
153
void Connection::addAsyncCleanup (std::function<void ()> fn)
155
154
{
156
- const std::unique_lock<std::mutex> lock (m_loop. m_mutex );
155
+ const std::unique_lock<std::mutex> lock (m_loop-> m_mutex );
157
156
// Add async cleanup callbacks to the back of the list. Unlike the sync
158
157
// cleanup list, this list order is more significant because it determines
159
158
// the order server objects are destroyed when there is a sudden disconnect,
@@ -244,7 +243,7 @@ void EventLoop::post(const std::function<void()>& fn)
244
243
return ;
245
244
}
246
245
std::unique_lock<std::mutex> lock (m_mutex);
247
- addClient ( lock);
246
+ EventLoopRef ref (* this , & lock);
248
247
m_cv.wait (lock, [this ] { return m_post_fn == nullptr ; });
249
248
m_post_fn = &fn;
250
249
int post_fd{m_post_fd};
@@ -253,20 +252,22 @@ void EventLoop::post(const std::function<void()>& fn)
253
252
KJ_SYSCALL (write (post_fd, &buffer, 1 ));
254
253
});
255
254
m_cv.wait (lock, [this , &fn] { return m_post_fn != &fn; });
256
- removeClient (lock);
257
255
}
258
256
259
257
void EventLoop::addClient (std::unique_lock<std::mutex>& lock) { m_num_clients += 1 ; }
260
258
261
259
bool EventLoop::removeClient (std::unique_lock<std::mutex>& lock)
262
260
{
261
+ assert (m_num_clients > 0 );
263
262
m_num_clients -= 1 ;
264
263
if (done (lock)) {
265
264
m_cv.notify_all ();
266
265
int post_fd{m_post_fd};
267
266
lock.unlock ();
268
267
char buffer = 0 ;
269
268
KJ_SYSCALL (write (post_fd, &buffer, 1 )); // NOLINT(bugprone-suspicious-semicolon)
269
+ // Do not try to relock `lock` after writing, because the event loop
270
+ // could wake up and destroy itself and the mutex might no longer exist.
270
271
return true ;
271
272
}
272
273
return false ;
@@ -275,20 +276,25 @@ bool EventLoop::removeClient(std::unique_lock<std::mutex>& lock)
275
276
void EventLoop::startAsyncThread (std::unique_lock<std::mutex>& lock)
276
277
{
277
278
if (m_async_thread.joinable ()) {
279
+ // Notify to wake up the async thread if it is already running.
278
280
m_cv.notify_all ();
279
281
} else if (!m_async_fns.empty ()) {
280
282
m_async_thread = std::thread ([this ] {
281
283
std::unique_lock<std::mutex> lock (m_mutex);
282
- while (true ) {
284
+ while (! done (lock) ) {
283
285
if (!m_async_fns.empty ()) {
284
- addClient ( lock) ;
286
+ EventLoopRef ref{* this , & lock} ;
285
287
const std::function<void ()> fn = std::move (m_async_fns.front ());
286
288
m_async_fns.pop_front ();
287
289
Unlock (lock, fn);
288
- if (removeClient (lock)) break ;
290
+ // Reset ref and break if that returns true instead of
291
+ // passively letting ref go out of scope. This is important
292
+ // because the ref destructor would leave m_mutex unlocked
293
+ // when done() returns true, causing undefined behavior if
294
+ // the loop continued to execute.
295
+ if (ref.reset ()) break ;
296
+ // Continue without waiting in case there are more async_fns
289
297
continue ;
290
- } else if (m_num_clients == 0 ) {
291
- break ;
292
298
}
293
299
m_cv.wait (lock);
294
300
}
@@ -394,7 +400,7 @@ kj::Promise<void> ProxyServer<ThreadMap>::makeThread(MakeThreadContext context)
394
400
const std::string from = context.getParams ().getName ();
395
401
std::promise<ThreadContext*> thread_context;
396
402
std::thread thread ([&thread_context, from, this ]() {
397
- g_thread_context.thread_name = ThreadName (m_connection.m_loop . m_exe_name ) + " (from " + from + " )" ;
403
+ g_thread_context.thread_name = ThreadName (m_connection.m_loop -> m_exe_name ) + " (from " + from + " )" ;
398
404
g_thread_context.waiter = std::make_unique<Waiter>();
399
405
thread_context.set_value (&g_thread_context);
400
406
std::unique_lock<std::mutex> lock (g_thread_context.waiter ->m_mutex );
0 commit comments