diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 0de372813b3..47d4d844434 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -825,6 +825,53 @@ where self.pending_send_only_events.lock().unwrap().push(send_peer_storage_event) } + + /// Loads a [`ChannelMonitor`] which already exists on disk after startup. + /// + /// Using this over [`chain::Watch::watch_channel`] avoids re-persisting a [`ChannelMonitor`] + /// that hasn't changed, slowing down startup. + /// + /// Note that this method *can* be used if additional blocks were replayed against the + /// [`ChannelMonitor`], and in general can only *not* be used if a [`ChannelMonitorUpdate`] was + /// replayed against the [`ChannelMonitor`] which needs to be persisted (i.e. the state has + /// changed due to a [`ChannelMonitorUpdate`] such that it may be different after another + /// restart). + /// + /// For [`ChannelMonitor`]s which were last serialized by an LDK version prior to 0.1 this will + /// fall back to calling [`chain::Watch::watch_channel`] and persisting the [`ChannelMonitor`]. + /// See the release notes for LDK 0.1 for more information on this requirement. + /// + /// [`ChannelMonitor`]s which do not need to be persisted (i.e. were last written by LDK 0.1 or + /// later) will be loaded without persistence and this method will return + /// [`ChannelMonitorUpdateStatus::Completed`]. + pub fn load_existing_monitor( + &self, channel_id: ChannelId, monitor: ChannelMonitor, + ) -> Result { + if !monitor.written_by_0_1_or_later() { + return chain::Watch::watch_channel(self, channel_id, monitor); + } + + let logger = WithChannelMonitor::from(&self.logger, &monitor, None); + let mut monitors = self.monitors.write().unwrap(); + let entry = match monitors.entry(channel_id) { + hash_map::Entry::Occupied(_) => { + log_error!(logger, "Failed to add new channel data: channel monitor for given channel ID is already present"); + return Err(()); + }, + hash_map::Entry::Vacant(e) => e, + }; + log_trace!( + logger, + "Loaded existing ChannelMonitor for channel {}", + log_funding_info!(monitor) + ); + if let Some(ref chain_source) = self.chain_source { + monitor.load_outputs_to_watch(chain_source, &self.logger); + } + entry.insert(MonitorHolder { monitor, pending_monitor_updates: Mutex::new(Vec::new()) }); + + Ok(ChannelMonitorUpdateStatus::Completed) + } } impl< diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index c7011af43a9..e22b04570ce 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1300,6 +1300,11 @@ pub(crate) struct ChannelMonitorImpl { // found at `Self::funding`. We don't use the term "renegotiated", as the currently locked // `FundingScope` could be one that was renegotiated. alternative_funding_confirmed: Option<(Txid, u32)>, + + /// [`ChannelMonitor`]s written by LDK prior to 0.1 need to be re-persisted after startup. To + /// make deciding whether to do so simple, here we track whether this monitor was last written + /// prior to 0.1. + written_by_0_1_or_later: bool, } // Macro helper to access holder commitment HTLC data (including both non-dust and dust) while @@ -1803,6 +1808,8 @@ impl ChannelMonitor { prev_holder_htlc_data: None, alternative_funding_confirmed: None, + + written_by_0_1_or_later: true, }) } @@ -1936,6 +1943,10 @@ impl ChannelMonitor { self.inner.lock().unwrap().get_funding_txo() } + pub(crate) fn written_by_0_1_or_later(&self) -> bool { + self.inner.lock().unwrap().written_by_0_1_or_later + } + /// Gets the funding script of the channel this ChannelMonitor is monitoring for. pub fn get_funding_script(&self) -> ScriptBuf { self.inner.lock().unwrap().get_funding_script() @@ -6080,6 +6091,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP (32, pending_funding, optional_vec), (34, alternative_funding_confirmed, option), }); + let written_by_0_1_or_later = payment_preimages_with_info.is_some(); if let Some(payment_preimages_with_info) = payment_preimages_with_info { if payment_preimages_with_info.len() != payment_preimages.len() { return Err(DecodeError::InvalidValue); @@ -6250,6 +6262,8 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP prev_holder_htlc_data, alternative_funding_confirmed, + + written_by_0_1_or_later, }))) } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 1fd99f89451..90790479976 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -15396,9 +15396,13 @@ impl Readable for VecDeque<(Event, Option)> { /// This is important if you have replayed a nontrivial number of blocks in step (4), allowing /// you to avoid having to replay the same blocks if you shut down quickly after startup. It is /// otherwise not required. +/// /// Note that if you're using a [`ChainMonitor`] for your [`chain::Watch`] implementation, you /// will likely accomplish this as a side-effect of calling [`chain::Watch::watch_channel`] in /// the next step. +/// +/// If you wish to avoid this for performance reasons, use +/// [`ChainMonitor::load_existing_monitor`]. /// 7) Move the [`ChannelMonitor`]s into your local [`chain::Watch`]. If you're using a /// [`ChainMonitor`], this is done by calling [`chain::Watch::watch_channel`]. /// @@ -15413,6 +15417,7 @@ impl Readable for VecDeque<(Event, Option)> { /// which you've already broadcasted the transaction. /// /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor +/// [`ChainMonitor::load_existing_monitor`]: crate::chain::chainmonitor::ChainMonitor::load_existing_monitor pub struct ChannelManagerReadArgs< 'a, M: Deref, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 00e883e8561..955209ea21f 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1324,8 +1324,8 @@ pub fn _reload_node<'a, 'b, 'c>( for monitor in monitors_read.drain(..) { let channel_id = monitor.channel_id(); assert_eq!( - node.chain_monitor.watch_channel(channel_id, monitor), - Ok(ChannelMonitorUpdateStatus::Completed) + node.chain_monitor.load_existing_monitor(channel_id, monitor), + Ok(ChannelMonitorUpdateStatus::Completed), ); check_added_monitors!(node, 1); } diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index e5388b76049..ca4f8a4f1be 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -515,6 +515,35 @@ impl<'a> TestChainMonitor<'a> { self.latest_monitor_update_id.lock().unwrap().get(channel_id).unwrap().clone(); self.chain_monitor.channel_monitor_updated(*channel_id, latest_update).unwrap(); } + + pub fn load_existing_monitor( + &self, channel_id: ChannelId, monitor: ChannelMonitor, + ) -> Result { + #[cfg(feature = "std")] + if let Some(blocker) = &*self.write_blocker.lock().unwrap() { + blocker.recv().unwrap(); + } + + // At every point where we get a monitor update, we should be able to send a useful monitor + // to a watchtower and disk... + let mut w = TestVecWriter(Vec::new()); + monitor.write(&mut w).unwrap(); + let new_monitor = <(BlockHash, ChannelMonitor)>::read( + &mut io::Cursor::new(&w.0), + (self.keys_manager, self.keys_manager), + ) + .unwrap() + .1; + // Note that a ChannelMonitor might not round-trip exactly here as we have tests that were + // serialized prior to LDK 0.1 and re-serializing them will flip the "written after LDK + // 0.1" flag. + self.latest_monitor_update_id + .lock() + .unwrap() + .insert(channel_id, (monitor.get_latest_update_id(), monitor.get_latest_update_id())); + self.added_monitors.lock().unwrap().push((channel_id, monitor)); + self.chain_monitor.load_existing_monitor(channel_id, new_monitor) + } } impl<'a> chain::Watch for TestChainMonitor<'a> { fn watch_channel(