From 9d11042772a2957fe0deaf1955ec33e318085817 Mon Sep 17 00:00:00 2001 From: Ryan Ofsky Date: Tue, 8 Oct 2024 16:01:16 -0400 Subject: [PATCH 1/2] bugfix: prevent double delete segfault in server if client disconnects during method call Fix issue where if a client disconnects in the middle of a long running IPC method call, the server will segfault when the method eventually returns. This behavior was reported by tdb3 in https://github.com/bitcoin/bitcoin/pull/31003#pullrequestreview-2349619672 including a stack trace showing where the crash happens in the PassField mp.Context overload while calling request_threads.erase. --- include/mp/proxy-types.h | 15 ++++++++++++--- src/mp/proxy.cpp | 5 +++++ 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/include/mp/proxy-types.h b/include/mp/proxy-types.h index c951d07f..793e583b 100644 --- a/include/mp/proxy-types.h +++ b/include/mp/proxy-types.h @@ -152,10 +152,19 @@ auto PassField(Priority<1>, TypeList<>, ServerContext& server_context, const Fn& // inserted, one already existed, meaning this must be a // recursive call (IPC call calling back to the caller which // makes another IPC call), so avoid modifying the map. - auto erase_thread{inserted ? request_thread : request_threads.end()}; - KJ_DEFER(if (erase_thread != request_threads.end()) { + const bool erase_thread{inserted}; + KJ_DEFER(if (erase_thread) { std::unique_lock lock(thread_context.waiter->m_mutex); - request_threads.erase(erase_thread); + // Call erase here with a Connection* argument instead + // of an iterator argument, because the `request_thread` + // iterator may be invalid if the connection is closed + // during this function call. More specifically, the + // iterator may be invalid because SetThread adds a + // cleanup callback to the Connection destructor that + // erases the thread from the map, and also because the + // ProxyServer destructor calls + // request_threads.clear(). + request_threads.erase(server.m_context.connection); }); fn.invoke(server_context, args...); } diff --git a/src/mp/proxy.cpp b/src/mp/proxy.cpp index 97dd3838..bcfbde8b 100644 --- a/src/mp/proxy.cpp +++ b/src/mp/proxy.cpp @@ -277,6 +277,11 @@ std::tuple SetThread(ConnThreads& threads, std::mutex& mutex, std::piecewise_construct, std::forward_as_tuple(connection), std::forward_as_tuple(make_thread(), connection, /* destroy_connection= */ false)).first; thread->second.setCleanup([&threads, &mutex, thread] { + // Note: it is safe to use the `thread` iterator in this cleanup + // function, because the iterator would only be invalid if the map entry + // was removed, and if the map entry is removed the ProxyClient + // destructor unregisters the cleanup. + // Connection is being destroyed before thread client is, so reset // thread client m_cleanup member so thread client destructor does not // try unregister this callback after connection is destroyed. From 196e6fcdbc1f4d7d03f4fd05b9221917c44f5807 Mon Sep 17 00:00:00 2001 From: Ryan Ofsky Date: Wed, 16 Oct 2024 11:23:12 -0400 Subject: [PATCH 2/2] bugfix: prevent null pointer dereference in server if client disconnects during method call This an imperfect workaround for a crash that can be triggered if a client disconnects in the middle of a long running IPC call. The workaround results in memory leak but avoids a crash. This can be improved later, and more details are in a code comment. --- include/mp/proxy-types.h | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/include/mp/proxy-types.h b/include/mp/proxy-types.h index 793e583b..e3b1b4b0 100644 --- a/include/mp/proxy-types.h +++ b/include/mp/proxy-types.h @@ -122,6 +122,7 @@ auto PassField(Priority<1>, TypeList<>, ServerContext& server_context, const Fn& const auto& params = call_context.getParams(); Context::Reader context_arg = Accessor::get(params); ServerContext server_context{server, call_context, req}; + bool disconnected{false}; { // Before invoking the function, store a reference to the // callbackThread provided by the client in the @@ -153,7 +154,7 @@ auto PassField(Priority<1>, TypeList<>, ServerContext& server_context, const Fn& // recursive call (IPC call calling back to the caller which // makes another IPC call), so avoid modifying the map. const bool erase_thread{inserted}; - KJ_DEFER(if (erase_thread) { + KJ_DEFER({ std::unique_lock lock(thread_context.waiter->m_mutex); // Call erase here with a Connection* argument instead // of an iterator argument, because the `request_thread` @@ -164,10 +165,24 @@ auto PassField(Priority<1>, TypeList<>, ServerContext& server_context, const Fn& // erases the thread from the map, and also because the // ProxyServer destructor calls // request_threads.clear(). - request_threads.erase(server.m_context.connection); + if (erase_thread) { + disconnected = !request_threads.erase(server.m_context.connection); + } else { + disconnected = !request_threads.count(server.m_context.connection); + } }); fn.invoke(server_context, args...); } + if (disconnected) { + // If disconnected is true, the Connection object was + // destroyed during the method call. Deal with this by + // returning without ever fulfilling the promise, which will + // cause the ProxyServer object to leak. This is not ideal, + // but fixing the leak will require nontrivial code changes + // because there is a lot of code assuming ProxyServer + // objects are destroyed before Connection objects. + return; + } KJ_IF_MAYBE(exception, kj::runCatchingExceptions([&]() { server.m_context.connection->m_loop.sync([&] { auto fulfiller_dispose = kj::mv(fulfiller);