@@ -66,8 +66,6 @@ struct ProxyClient<Thread> : public ProxyClientBase<Thread, ::capnp::Void>
6666 ProxyClient (const ProxyClient&) = delete ;
6767 ~ProxyClient ();
6868
69- void setDisconnectCallback (const std::function<void ()>& fn);
70-
7169 // ! Reference to callback function that is run if there is a sudden
7270 // ! disconnect and the Connection object is destroyed before this
7371 // ! ProxyClient<Thread> object. The callback will destroy this object and
@@ -285,16 +283,16 @@ struct Waiter
285283 template <typename Fn>
286284 void post (Fn&& fn)
287285 {
288- const std::unique_lock<std::mutex> lock (m_mutex);
286+ const Lock lock (m_mutex);
289287 assert (!m_fn);
290288 m_fn = std::forward<Fn>(fn);
291289 m_cv.notify_all ();
292290 }
293291
294292 template <class Predicate >
295- void wait (std::unique_lock<std::mutex> & lock, Predicate pred)
293+ void wait (Lock & lock, Predicate pred)
296294 {
297- m_cv.wait (lock, [&] {
295+ m_cv.wait (lock. m_lock , [&]() MP_REQUIRES (m_mutex) {
298296 // Important for this to be "while (m_fn)", not "if (m_fn)" to avoid
299297 // a lost-wakeup bug. A new m_fn and m_cv notification might be sent
300298 // after the fn() call and before the lock.lock() call in this loop
@@ -317,9 +315,9 @@ struct Waiter
317315 // ! mutexes than necessary. This mutex can be held at the same time as
318316 // ! EventLoop::m_mutex as long as Waiter::mutex is locked first and
319317 // ! EventLoop::m_mutex is locked second.
320- std::mutex m_mutex;
318+ Mutex m_mutex;
321319 std::condition_variable m_cv;
322- std::optional<kj::Function<void ()>> m_fn;
320+ std::optional<kj::Function<void ()>> m_fn MP_GUARDED_BY (m_mutex) ;
323321};
324322
325323// ! Object holding network & rpc state associated with either an incoming server
@@ -544,29 +542,73 @@ void ProxyServerBase<Interface, Impl>::invokeDestroy()
544542 CleanupRun (m_context.cleanup_fns );
545543}
546544
547- using ConnThreads = std::map<Connection*, ProxyClient<Thread>>;
545+ // ! Map from Connection to local or remote thread handle which will be used over
546+ // ! that connection. This map will typically only contain one entry, but can
547+ // ! contain multiple if a single thread makes IPC calls over multiple
548+ // ! connections. A std::optional value type is used to avoid the map needing to
549+ // ! be locked while ProxyClient<Thread> objects are constructed, see
550+ // ! ThreadContext "Synchronization note" below.
551+ using ConnThreads = std::map<Connection*, std::optional<ProxyClient<Thread>>>;
548552using ConnThread = ConnThreads::iterator;
549553
550554// Retrieve ProxyClient<Thread> object associated with this connection from a
551555// map, or create a new one and insert it into the map. Return map iterator and
552556// inserted bool.
553- std::tuple<ConnThread, bool > SetThread (ConnThreads& threads, std::mutex& mutex , Connection* connection, const std::function<Thread::Client()>& make_thread);
557+ std::tuple<ConnThread, bool > SetThread (GuardedRef< ConnThreads> threads, Connection* connection, const std::function<Thread::Client()>& make_thread);
554558
559+ // ! The thread_local ThreadContext g_thread_context struct provides information
560+ // ! about individual threads and a way of communicating between them. Because
561+ // ! it's a thread local struct, each ThreadContext instance is initialized by
562+ // ! the thread that owns it.
563+ // !
564+ // ! ThreadContext is used for any client threads created externally which make
565+ // ! IPC calls, and for server threads created by
566+ // ! ProxyServer<ThreadMap>::makeThread() which execute IPC calls for clients.
567+ // !
568+ // ! In both cases, the struct holds information like the thread name, and a
569+ // ! Waiter object where the EventLoop can post incoming IPC requests to execute
570+ // ! on the thread. The struct also holds ConnThread maps associating the thread
571+ // ! with local and remote ProxyClient<Thread> objects.
555572struct ThreadContext
556573{
557574 // ! Identifying string for debug.
558575 std::string thread_name;
559576
560- // ! Waiter object used to allow client threads blocked waiting for a server
561- // ! response to execute callbacks made from the client's corresponding
562- // ! server thread.
577+ // ! Waiter object used to allow remote clients to execute code on this
578+ // ! thread. For server threads created by
579+ // ! ProxyServer<ThreadMap>::makeThread(), this is initialized in that
580+ // ! function. Otherwise, for client threads created externally, this is
581+ // ! initialized the first time the thread tries to make an IPC call. Having
582+ // ! a waiter is necessary for threads making IPC calls in case a server they
583+ // ! are calling expects them to execute a callback during the call, before
584+ // ! it sends a response.
585+ // !
586+ // ! For IPC client threads, the Waiter pointer is never cleared and the Waiter
587+ // ! just gets destroyed when the thread does. For server threads created by
588+ // ! makeThread(), this pointer is set to null in the ~ProxyServer<Thread> as
589+ // ! a signal for the thread to exit and destroy itself. In both cases, the
590+ // ! same Waiter object is used across different calls and only created and
591+ // ! destroyed once for the lifetime of the thread.
563592 std::unique_ptr<Waiter> waiter = nullptr ;
564593
565594 // ! When client is making a request to a server, this is the
566595 // ! `callbackThread` argument it passes in the request, used by the server
567596 // ! in case it needs to make callbacks into the client that need to execute
568597 // ! while the client is waiting. This will be set to a local thread object.
569- ConnThreads callback_threads;
598+ // !
599+ // ! Synchronization note: The callback_thread and request_thread maps are
600+ // ! only ever accessed internally by this thread's destructor and externally
601+ // ! by Cap'n Proto event loop threads. Since it's possible for IPC client
602+ // ! threads to make calls over different connections that could have
603+ // ! different event loops, these maps are guarded by Waiter::m_mutex in case
604+ // ! different event loop threads add or remove map entries simultaneously.
605+ // ! However, individual ProxyClient<Thread> objects in the maps will only be
606+ // ! associated with one event loop and guarded by EventLoop::m_mutex. So
607+ // ! Waiter::m_mutex does not need to be held while accessing individual
608+ // ! ProxyClient<Thread> instances, and may even need to be released to
609+ // ! respect lock order and avoid locking Waiter::m_mutex before
610+ // ! EventLoop::m_mutex.
611+ ConnThreads callback_threads MP_GUARDED_BY (waiter->m_mutex);
570612
571613 // ! When client is making a request to a server, this is the `thread`
572614 // ! argument it passes in the request, used to control which thread on
@@ -575,7 +617,9 @@ struct ThreadContext
575617 // ! by makeThread. If a client call is being made from a thread currently
576618 // ! handling a server request, this will be set to the `callbackThread`
577619 // ! request thread argument passed in that request.
578- ConnThreads request_threads;
620+ // !
621+ // ! Synchronization note: \ref callback_threads note applies here as well.
622+ ConnThreads request_threads MP_GUARDED_BY (waiter->m_mutex);
579623
580624 // ! Whether this thread is a capnp event loop thread. Not really used except
581625 // ! to assert false if there's an attempt to execute a blocking operation
0 commit comments