@@ -2051,11 +2051,11 @@ macro_rules! handle_new_monitor_update {
20512051 ChannelMonitorUpdateStatus::InProgress => {
20522052 log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
20532053 &$chan.context.channel_id());
2054- Ok( false)
2054+ false
20552055 },
20562056 ChannelMonitorUpdateStatus::Completed => {
20572057 $completed;
2058- Ok( true)
2058+ true
20592059 },
20602060 }
20612061 } };
@@ -2070,14 +2070,9 @@ macro_rules! handle_new_monitor_update {
20702070 $per_peer_state_lock, chan, MANUALLY_REMOVING_INITIAL_MONITOR, { $chan_entry.remove() })
20712071 } else {
20722072 // We're not supposed to handle monitor updates for unfunded channels (they have no monitors to
2073- // update).
2074- debug_assert!(false);
2075- let channel_id = *$chan_entry.key();
2076- let (_, err) = convert_chan_phase_err!($self, ChannelError::Close(
2077- "Cannot update monitor for unfunded channels as they don't have monitors yet".into()),
2078- $chan_entry.get_mut(), &channel_id);
2079- $chan_entry.remove();
2080- Err(err)
2073+ // update). Throwing away a monitor update could be dangerous, so we assert even in
2074+ // release builds.
2075+ panic!("Initial Monitors should not exist for non-funded channels");
20812076 }
20822077 };
20832078 ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
@@ -2107,14 +2102,9 @@ macro_rules! handle_new_monitor_update {
21072102 $per_peer_state_lock, chan, MANUALLY_REMOVING, { $chan_entry.remove() })
21082103 } else {
21092104 // We're not supposed to handle monitor updates for unfunded channels (they have no monitors to
2110- // update).
2111- debug_assert!(false);
2112- let channel_id = *$chan_entry.key();
2113- let (_, err) = convert_chan_phase_err!($self, ChannelError::Close(
2114- "Cannot update monitor for unfunded channels as they don't have monitors yet".into()),
2115- $chan_entry.get_mut(), &channel_id);
2116- $chan_entry.remove();
2117- Err(err)
2105+ // update). Throwing away a monitor update could be dangerous, so we assert even in
2106+ // release builds.
2107+ panic!("Monitor updates should not exist for non-funded channels");
21182108 }
21192109 }
21202110}
@@ -2529,7 +2519,7 @@ where
25292519 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
25302520
25312521 let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
2532- let result: Result<(), _> = loop {
2522+ loop {
25332523 {
25342524 let per_peer_state = self.per_peer_state.read().unwrap();
25352525
@@ -2558,8 +2548,9 @@ where
25582548
25592549 // Update the monitor with the shutdown script if necessary.
25602550 if let Some(monitor_update) = monitor_update_opt.take() {
2561- break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
2562- peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ());
2551+ handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
2552+ peer_state_lock, peer_state, per_peer_state, chan_phase_entry);
2553+ break;
25632554 }
25642555
25652556 if chan.is_shutdown() {
@@ -2572,7 +2563,7 @@ where
25722563 self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
25732564 }
25742565 }
2575- break Ok(()) ;
2566+ break;
25762567 }
25772568 },
25782569 hash_map::Entry::Vacant(_) => (),
@@ -2591,7 +2582,6 @@ where
25912582 self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
25922583 }
25932584
2594- let _ = handle_error!(self, result, *counterparty_node_id);
25952585 Ok(())
25962586 }
25972587
@@ -3334,8 +3324,7 @@ where
33343324 match break_chan_phase_entry!(self, send_res, chan_phase_entry) {
33353325 Some(monitor_update) => {
33363326 match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan_phase_entry) {
3337- Err(e) => break Err(e),
3338- Ok(false) => {
3327+ false => {
33393328 // Note that MonitorUpdateInProgress here indicates (per function
33403329 // docs) that we will resend the commitment update once monitor
33413330 // updating completes. Therefore, we must return an error
@@ -3344,7 +3333,7 @@ where
33443333 // MonitorUpdateInProgress, below.
33453334 return Err(APIError::MonitorUpdateInProgress);
33463335 },
3347- Ok( true) => {},
3336+ true => {},
33483337 }
33493338 },
33503339 None => {},
@@ -4526,7 +4515,7 @@ where
45264515 },
45274516 BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update } => {
45284517 let mut updated_chan = false;
4529- let res = {
4518+ {
45304519 let per_peer_state = self.per_peer_state.read().unwrap();
45314520 if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
45324521 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
@@ -4535,24 +4524,16 @@ where
45354524 hash_map::Entry::Occupied(mut chan_phase) => {
45364525 updated_chan = true;
45374526 handle_new_monitor_update!(self, funding_txo, update.clone(),
4538- peer_state_lock, peer_state, per_peer_state, chan_phase).map(|_| ())
4527+ peer_state_lock, peer_state, per_peer_state, chan_phase);
45394528 },
4540- hash_map::Entry::Vacant(_) => Ok(()) ,
4529+ hash_map::Entry::Vacant(_) => {} ,
45414530 }
4542- } else { Ok(()) }
4543- };
4531+ }
4532+ }
45444533 if !updated_chan {
45454534 // TODO: Track this as in-flight even though the channel is closed.
45464535 let _ = self.chain_monitor.update_channel(funding_txo, &update);
45474536 }
4548- // TODO: If this channel has since closed, we're likely providing a payment
4549- // preimage update, which we must ensure is durable! We currently don't,
4550- // however, ensure that.
4551- if res.is_err() {
4552- log_error!(self.logger,
4553- "Failed to provide ChannelMonitorUpdate to closed channel! This likely lost us a payment preimage!");
4554- }
4555- let _ = handle_error!(self, res, counterparty_node_id);
45564537 },
45574538 BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
45584539 let per_peer_state = self.per_peer_state.read().unwrap();
@@ -5275,15 +5256,8 @@ where
52755256 peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
52765257 }
52775258 if !during_init {
5278- let res = handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
5259+ handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
52795260 peer_state, per_peer_state, chan_phase_entry);
5280- if let Err(e) = res {
5281- // TODO: This is a *critical* error - we probably updated the outbound edge
5282- // of the HTLC's monitor with a preimage. We should retry this monitor
5283- // update over and over again until morale improves.
5284- log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
5285- return Err((counterparty_node_id, e));
5286- }
52875261 } else {
52885262 // If we're running during init we cannot update a monitor directly -
52895263 // they probably haven't actually been loaded yet. Instead, push the
@@ -5934,24 +5908,13 @@ where
59345908 });
59355909
59365910 if let ChannelPhase::Funded(chan) = e.insert(ChannelPhase::Funded(chan)) {
5937- let mut res = handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
5911+ handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
59385912 per_peer_state, chan, MANUALLY_REMOVING_INITIAL_MONITOR,
59395913 { peer_state.channel_by_id.remove(&new_channel_id) });
5940-
5941- // Note that we reply with the new channel_id in error messages if we gave up on the
5942- // channel, not the temporary_channel_id. This is compatible with ourselves, but the
5943- // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
5944- // any messages referencing a previously-closed channel anyway.
5945- // We do not propagate the monitor update to the user as it would be for a monitor
5946- // that we didn't manage to store (and that we don't care about - we don't respond
5947- // with the funding_signed so the channel can never go on chain).
5948- if let Err(MsgHandleErrInternal { shutdown_finish: Some((res, _)), .. }) = &mut res {
5949- res.0 = None;
5950- }
5951- res.map(|_| ())
59525914 } else {
59535915 unreachable!("This must be a funded channel as we just inserted it.");
59545916 }
5917+ Ok(())
59555918 } else {
59565919 log_error!(self.logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
59575920 return Err(MsgHandleErrInternal::send_err_msg_no_close(
@@ -5982,16 +5945,8 @@ where
59825945 let monitor = try_chan_phase_entry!(self,
59835946 chan.funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan_phase_entry);
59845947 if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
5985- let mut res = handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan_phase_entry, INITIAL_MONITOR);
5986- if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
5987- // We weren't able to watch the channel to begin with, so no updates should be made on
5988- // it. Previously, full_stack_target found an (unreachable) panic when the
5989- // monitor update contained within `shutdown_finish` was applied.
5990- if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
5991- shutdown_finish.0.take();
5992- }
5993- }
5994- res.map(|_| ())
5948+ handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan_phase_entry, INITIAL_MONITOR);
5949+ Ok(())
59955950 } else {
59965951 try_chan_phase_entry!(self, Err(ChannelError::Close("Channel funding outpoint was a duplicate".to_owned())), chan_phase_entry)
59975952 }
@@ -6096,8 +6051,8 @@ where
60966051 }
60976052 // Update the monitor with the shutdown script if necessary.
60986053 if let Some(monitor_update) = monitor_update_opt {
6099- break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
6100- peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ()) ;
6054+ handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
6055+ peer_state_lock, peer_state, per_peer_state, chan_phase_entry);
61016056 }
61026057 break Ok(());
61036058 },
@@ -6350,8 +6305,9 @@ where
63506305 let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &self.logger), chan_phase_entry);
63516306 if let Some(monitor_update) = monitor_update_opt {
63526307 handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
6353- peer_state, per_peer_state, chan_phase_entry).map(|_| ())
6354- } else { Ok(()) }
6308+ peer_state, per_peer_state, chan_phase_entry);
6309+ }
6310+ Ok(())
63556311 } else {
63566312 return try_chan_phase_entry!(self, Err(ChannelError::Close(
63576313 "Got a commitment_signed message for an unfunded channel!".into())), chan_phase_entry);
@@ -6519,13 +6475,13 @@ where
65196475 } else { false };
65206476 let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self,
65216477 chan.revoke_and_ack(&msg, &self.fee_estimator, &self.logger, mon_update_blocked), chan_phase_entry);
6522- let res = if let Some(monitor_update) = monitor_update_opt {
6478+ if let Some(monitor_update) = monitor_update_opt {
65236479 let funding_txo = funding_txo_opt
65246480 .expect("Funding outpoint must have been set for RAA handling to succeed");
65256481 handle_new_monitor_update!(self, funding_txo, monitor_update,
6526- peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ())
6527- } else { Ok(()) };
6528- (htlcs_to_fail, res )
6482+ peer_state_lock, peer_state, per_peer_state, chan_phase_entry);
6483+ }
6484+ (htlcs_to_fail, Ok(()) )
65296485 } else {
65306486 return try_chan_phase_entry!(self, Err(ChannelError::Close(
65316487 "Got a revoke_and_ack message for an unfunded channel!".into())), chan_phase_entry);
@@ -6796,7 +6752,6 @@ where
67966752 fn check_free_holding_cells(&self) -> bool {
67976753 let mut has_monitor_update = false;
67986754 let mut failed_htlcs = Vec::new();
6799- let mut handle_errors = Vec::new();
68006755
68016756 // Walk our list of channels and find any that need to update. Note that when we do find an
68026757 // update, if it includes actions that must be taken afterwards, we have to drop the
@@ -6822,12 +6777,9 @@ where
68226777 has_monitor_update = true;
68236778
68246779 let channel_id: ChannelId = *channel_id;
6825- let res = handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
6780+ handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
68266781 peer_state_lock, peer_state, per_peer_state, chan, MANUALLY_REMOVING,
68276782 peer_state.channel_by_id.remove(&channel_id));
6828- if res.is_err() {
6829- handle_errors.push((counterparty_node_id, res));
6830- }
68316783 continue 'peer_loop;
68326784 }
68336785 }
@@ -6837,15 +6789,11 @@ where
68376789 break 'peer_loop;
68386790 }
68396791
6840- let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty() ;
6792+ let has_update = has_monitor_update || !failed_htlcs.is_empty();
68416793 for (failures, channel_id, counterparty_node_id) in failed_htlcs.drain(..) {
68426794 self.fail_holding_cell_htlcs(failures, channel_id, &counterparty_node_id);
68436795 }
68446796
6845- for (counterparty_node_id, err) in handle_errors.drain(..) {
6846- let _ = handle_error!(self, err, counterparty_node_id);
6847- }
6848-
68496797 has_update
68506798 }
68516799
@@ -7172,11 +7120,8 @@ where
71727120 if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
71737121 log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
71747122 channel_funding_outpoint.to_channel_id());
7175- if let Err(e) = handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
7176- peer_state_lck, peer_state, per_peer_state, chan_phase_entry)
7177- {
7178- errors.push((e, counterparty_node_id));
7179- }
7123+ handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
7124+ peer_state_lck, peer_state, per_peer_state, chan_phase_entry);
71807125 if further_update_exists {
71817126 // If there are more `ChannelMonitorUpdate`s to process, restart at the
71827127 // top of the loop.
0 commit comments