Skip to content

Add a method to avoid re-persisting monitors during startup #3996

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 37 additions & 0 deletions lightning/src/chain/chainmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -825,6 +825,43 @@ where

self.pending_send_only_events.lock().unwrap().push(send_peer_storage_event)
}

/// Loads a [`ChannelMonitor`] which already exists on disk after startup.
///
/// Using this over [`chain::Watch::watch_channel`] avoids re-persisting a [`ChannelMonitor`]
/// that hasn't changed, slowing down startup.
///
/// Note that this method *can* be used if additional blocks were replayed against the
/// [`ChannelMonitor`], and in general can only *not* be used if a [`ChannelMonitorUpdate`] was
/// replayed against the [`ChannelMonitor`] which needs to be psersisted (i.e. the state has
/// changed due to a [`ChannelMonitorUpdate`] such that it may be different after another
/// restart).
///
/// This method is only safe for [`ChannelMonitor`]s which have been loaded (in conjunction
/// with a `ChannelManager`) at least once by LDK 0.1 or later.
pub fn load_post_0_1_existing_monitor(
&self, channel_id: ChannelId, monitor: ChannelMonitor<ChannelSigner>,
) -> Result<(), ()> {
let logger = WithChannelMonitor::from(&self.logger, &monitor, None);
let mut monitors = self.monitors.write().unwrap();
let entry = match monitors.entry(channel_id) {
hash_map::Entry::Occupied(_) => {
log_error!(logger, "Failed to add new channel data: channel monitor for given channel ID is already present");
return Err(());
},
hash_map::Entry::Vacant(e) => e,
};
log_trace!(
logger,
"Loaded existing ChannelMonitor for channel {}",
log_funding_info!(monitor)
);
if let Some(ref chain_source) = self.chain_source {
monitor.load_outputs_to_watch(chain_source, &self.logger);
}
entry.insert(MonitorHolder { monitor, pending_monitor_updates: Mutex::new(Vec::new()) });
Ok(())
}
}

impl<
Expand Down
5 changes: 5 additions & 0 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15396,9 +15396,13 @@ impl Readable for VecDeque<(Event, Option<EventCompletionAction>)> {
/// This is important if you have replayed a nontrivial number of blocks in step (4), allowing
/// you to avoid having to replay the same blocks if you shut down quickly after startup. It is
/// otherwise not required.
///
/// Note that if you're using a [`ChainMonitor`] for your [`chain::Watch`] implementation, you
/// will likely accomplish this as a side-effect of calling [`chain::Watch::watch_channel`] in
/// the next step.
///
/// If you wish to avoid this for performance reasons, use
/// [`ChainMonitor::load_post_0_1_existing_monitor`].
/// 7) Move the [`ChannelMonitor`]s into your local [`chain::Watch`]. If you're using a
/// [`ChainMonitor`], this is done by calling [`chain::Watch::watch_channel`].
///
Expand All @@ -15413,6 +15417,7 @@ impl Readable for VecDeque<(Event, Option<EventCompletionAction>)> {
/// which you've already broadcasted the transaction.
///
/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
/// [`ChainMonitor::load_post_0_1_existing_monitor`]: crate::chain::chainmonitor::ChainMonitor::load_post_0_1_existing_monitor
pub struct ChannelManagerReadArgs<
'a,
M: Deref,
Expand Down
5 changes: 1 addition & 4 deletions lightning/src/ln/functional_test_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1323,10 +1323,7 @@ pub fn _reload_node<'a, 'b, 'c>(

for monitor in monitors_read.drain(..) {
let channel_id = monitor.channel_id();
assert_eq!(
node.chain_monitor.watch_channel(channel_id, monitor),
Ok(ChannelMonitorUpdateStatus::Completed)
);
assert_eq!(node.chain_monitor.load_post_0_1_existing_monitor(channel_id, monitor), Ok(()));
check_added_monitors!(node, 1);
}

Expand Down
27 changes: 27 additions & 0 deletions lightning/src/util/test_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -515,6 +515,33 @@ impl<'a> TestChainMonitor<'a> {
self.latest_monitor_update_id.lock().unwrap().get(channel_id).unwrap().clone();
self.chain_monitor.channel_monitor_updated(*channel_id, latest_update).unwrap();
}

pub fn load_post_0_1_existing_monitor(
&self, channel_id: ChannelId, monitor: ChannelMonitor<TestChannelSigner>,
) -> Result<(), ()> {
#[cfg(feature = "std")]
if let Some(blocker) = &*self.write_blocker.lock().unwrap() {
blocker.recv().unwrap();
}

// At every point where we get a monitor update, we should be able to send a useful monitor
// to a watchtower and disk...
let mut w = TestVecWriter(Vec::new());
monitor.write(&mut w).unwrap();
let new_monitor = <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(
&mut io::Cursor::new(&w.0),
(self.keys_manager, self.keys_manager),
)
.unwrap()
.1;
assert!(new_monitor == monitor);
self.latest_monitor_update_id
.lock()
.unwrap()
.insert(channel_id, (monitor.get_latest_update_id(), monitor.get_latest_update_id()));
self.added_monitors.lock().unwrap().push((channel_id, monitor));
self.chain_monitor.load_post_0_1_existing_monitor(channel_id, new_monitor)
}
}
impl<'a> chain::Watch<TestChannelSigner> for TestChainMonitor<'a> {
fn watch_channel(
Expand Down
Loading