@@ -1989,11 +1989,11 @@ macro_rules! handle_new_monitor_update {
19891989 ChannelMonitorUpdateStatus::InProgress => {
19901990 log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
19911991 &$chan.context.channel_id());
1992- Ok( false)
1992+ false
19931993 },
19941994 ChannelMonitorUpdateStatus::Completed => {
19951995 $completed;
1996- Ok( true)
1996+ true
19971997 },
19981998 }
19991999 } };
@@ -2008,14 +2008,9 @@ macro_rules! handle_new_monitor_update {
20082008 $per_peer_state_lock, chan, MANUALLY_REMOVING_INITIAL_MONITOR, { $chan_entry.remove() })
20092009 } else {
20102010 // We're not supposed to handle monitor updates for unfunded channels (they have no monitors to
2011- // update).
2012- debug_assert!(false);
2013- let channel_id = *$chan_entry.key();
2014- let (_, err) = convert_chan_phase_err!($self, ChannelError::Close(
2015- "Cannot update monitor for unfunded channels as they don't have monitors yet".into()),
2016- $chan_entry.get_mut(), &channel_id);
2017- $chan_entry.remove();
2018- Err(err)
2011+ // update). Throwing away a monitor update could be dangerous, so we assert even in
2012+ // release builds.
2013+ panic!("Initial Monitors should not exist for non-funded channels");
20192014 }
20202015 };
20212016 ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
@@ -2045,14 +2040,9 @@ macro_rules! handle_new_monitor_update {
20452040 $per_peer_state_lock, chan, MANUALLY_REMOVING, { $chan_entry.remove() })
20462041 } else {
20472042 // We're not supposed to handle monitor updates for unfunded channels (they have no monitors to
2048- // update).
2049- debug_assert!(false);
2050- let channel_id = *$chan_entry.key();
2051- let (_, err) = convert_chan_phase_err!($self, ChannelError::Close(
2052- "Cannot update monitor for unfunded channels as they don't have monitors yet".into()),
2053- $chan_entry.get_mut(), &channel_id);
2054- $chan_entry.remove();
2055- Err(err)
2043+ // update). Throwing away a monitor update could be dangerous, so we assert even in
2044+ // release builds.
2045+ panic!("Monitor updates should not exist for non-funded channels");
20562046 }
20572047 }
20582048}
@@ -2458,7 +2448,7 @@ where
24582448 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
24592449
24602450 let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
2461- let result: Result<(), _> = loop {
2451+ loop {
24622452 {
24632453 let per_peer_state = self.per_peer_state.read().unwrap();
24642454
@@ -2487,8 +2477,9 @@ where
24872477
24882478 // Update the monitor with the shutdown script if necessary.
24892479 if let Some(monitor_update) = monitor_update_opt.take() {
2490- break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
2491- peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ());
2480+ handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
2481+ peer_state_lock, peer_state, per_peer_state, chan_phase_entry);
2482+ break;
24922483 }
24932484
24942485 if chan.is_shutdown() {
@@ -2501,7 +2492,7 @@ where
25012492 self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
25022493 }
25032494 }
2504- break Ok(()) ;
2495+ break;
25052496 }
25062497 },
25072498 hash_map::Entry::Vacant(_) => (),
@@ -2520,7 +2511,6 @@ where
25202511 self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
25212512 }
25222513
2523- let _ = handle_error!(self, result, *counterparty_node_id);
25242514 Ok(())
25252515 }
25262516
@@ -3251,8 +3241,7 @@ where
32513241 match break_chan_phase_entry!(self, send_res, chan_phase_entry) {
32523242 Some(monitor_update) => {
32533243 match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan_phase_entry) {
3254- Err(e) => break Err(e),
3255- Ok(false) => {
3244+ false => {
32563245 // Note that MonitorUpdateInProgress here indicates (per function
32573246 // docs) that we will resend the commitment update once monitor
32583247 // updating completes. Therefore, we must return an error
@@ -3261,7 +3250,7 @@ where
32613250 // MonitorUpdateInProgress, below.
32623251 return Err(APIError::MonitorUpdateInProgress);
32633252 },
3264- Ok( true) => {},
3253+ true => {},
32653254 }
32663255 },
32673256 None => {},
@@ -4330,7 +4319,7 @@ where
43304319 },
43314320 BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update } => {
43324321 let mut updated_chan = false;
4333- let res = {
4322+ {
43344323 let per_peer_state = self.per_peer_state.read().unwrap();
43354324 if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
43364325 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
@@ -4339,24 +4328,16 @@ where
43394328 hash_map::Entry::Occupied(mut chan_phase) => {
43404329 updated_chan = true;
43414330 handle_new_monitor_update!(self, funding_txo, update.clone(),
4342- peer_state_lock, peer_state, per_peer_state, chan_phase).map(|_| ())
4331+ peer_state_lock, peer_state, per_peer_state, chan_phase);
43434332 },
4344- hash_map::Entry::Vacant(_) => Ok(()) ,
4333+ hash_map::Entry::Vacant(_) => {} ,
43454334 }
4346- } else { Ok(()) }
4347- };
4335+ }
4336+ }
43484337 if !updated_chan {
43494338 // TODO: Track this as in-flight even though the channel is closed.
43504339 let _ = self.chain_monitor.update_channel(funding_txo, &update);
43514340 }
4352- // TODO: If this channel has since closed, we're likely providing a payment
4353- // preimage update, which we must ensure is durable! We currently don't,
4354- // however, ensure that.
4355- if res.is_err() {
4356- log_error!(self.logger,
4357- "Failed to provide ChannelMonitorUpdate to closed channel! This likely lost us a payment preimage!");
4358- }
4359- let _ = handle_error!(self, res, counterparty_node_id);
43604341 },
43614342 BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
43624343 let per_peer_state = self.per_peer_state.read().unwrap();
@@ -5079,15 +5060,8 @@ where
50795060 peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
50805061 }
50815062 if !during_init {
5082- let res = handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
5063+ handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
50835064 peer_state, per_peer_state, chan_phase_entry);
5084- if let Err(e) = res {
5085- // TODO: This is a *critical* error - we probably updated the outbound edge
5086- // of the HTLC's monitor with a preimage. We should retry this monitor
5087- // update over and over again until morale improves.
5088- log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
5089- return Err((counterparty_node_id, e));
5090- }
50915065 } else {
50925066 // If we're running during init we cannot update a monitor directly -
50935067 // they probably haven't actually been loaded yet. Instead, push the
@@ -5734,24 +5708,13 @@ where
57345708 });
57355709
57365710 if let ChannelPhase::Funded(chan) = e.insert(ChannelPhase::Funded(chan)) {
5737- let mut res = handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
5711+ handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
57385712 per_peer_state, chan, MANUALLY_REMOVING_INITIAL_MONITOR,
57395713 { peer_state.channel_by_id.remove(&new_channel_id) });
5740-
5741- // Note that we reply with the new channel_id in error messages if we gave up on the
5742- // channel, not the temporary_channel_id. This is compatible with ourselves, but the
5743- // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
5744- // any messages referencing a previously-closed channel anyway.
5745- // We do not propagate the monitor update to the user as it would be for a monitor
5746- // that we didn't manage to store (and that we don't care about - we don't respond
5747- // with the funding_signed so the channel can never go on chain).
5748- if let Err(MsgHandleErrInternal { shutdown_finish: Some((res, _)), .. }) = &mut res {
5749- res.0 = None;
5750- }
5751- res.map(|_| ())
57525714 } else {
57535715 unreachable!("This must be a funded channel as we just inserted it.");
57545716 }
5717+ Ok(())
57555718 } else {
57565719 log_error!(self.logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
57575720 return Err(MsgHandleErrInternal::send_err_msg_no_close(
@@ -5782,16 +5745,8 @@ where
57825745 let monitor = try_chan_phase_entry!(self,
57835746 chan.funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan_phase_entry);
57845747 if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
5785- let mut res = handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan_phase_entry, INITIAL_MONITOR);
5786- if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
5787- // We weren't able to watch the channel to begin with, so no updates should be made on
5788- // it. Previously, full_stack_target found an (unreachable) panic when the
5789- // monitor update contained within `shutdown_finish` was applied.
5790- if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
5791- shutdown_finish.0.take();
5792- }
5793- }
5794- res.map(|_| ())
5748+ handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan_phase_entry, INITIAL_MONITOR);
5749+ Ok(())
57955750 } else {
57965751 try_chan_phase_entry!(self, Err(ChannelError::Close("Channel funding outpoint was a duplicate".to_owned())), chan_phase_entry)
57975752 }
@@ -5894,8 +5849,8 @@ where
58945849 }
58955850 // Update the monitor with the shutdown script if necessary.
58965851 if let Some(monitor_update) = monitor_update_opt {
5897- break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
5898- peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ()) ;
5852+ handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
5853+ peer_state_lock, peer_state, per_peer_state, chan_phase_entry);
58995854 }
59005855 break Ok(());
59015856 },
@@ -6141,8 +6096,9 @@ where
61416096 let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &self.logger), chan_phase_entry);
61426097 if let Some(monitor_update) = monitor_update_opt {
61436098 handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
6144- peer_state, per_peer_state, chan_phase_entry).map(|_| ())
6145- } else { Ok(()) }
6099+ peer_state, per_peer_state, chan_phase_entry);
6100+ }
6101+ Ok(())
61466102 } else {
61476103 return try_chan_phase_entry!(self, Err(ChannelError::Close(
61486104 "Got a commitment_signed message for an unfunded channel!".into())), chan_phase_entry);
@@ -6310,13 +6266,13 @@ where
63106266 } else { false };
63116267 let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self,
63126268 chan.revoke_and_ack(&msg, &self.fee_estimator, &self.logger, mon_update_blocked), chan_phase_entry);
6313- let res = if let Some(monitor_update) = monitor_update_opt {
6269+ if let Some(monitor_update) = monitor_update_opt {
63146270 let funding_txo = funding_txo_opt
63156271 .expect("Funding outpoint must have been set for RAA handling to succeed");
63166272 handle_new_monitor_update!(self, funding_txo, monitor_update,
6317- peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ())
6318- } else { Ok(()) };
6319- (htlcs_to_fail, res )
6273+ peer_state_lock, peer_state, per_peer_state, chan_phase_entry);
6274+ }
6275+ (htlcs_to_fail, Ok(()) )
63206276 } else {
63216277 return try_chan_phase_entry!(self, Err(ChannelError::Close(
63226278 "Got a revoke_and_ack message for an unfunded channel!".into())), chan_phase_entry);
@@ -6585,7 +6541,6 @@ where
65856541 fn check_free_holding_cells(&self) -> bool {
65866542 let mut has_monitor_update = false;
65876543 let mut failed_htlcs = Vec::new();
6588- let mut handle_errors = Vec::new();
65896544
65906545 // Walk our list of channels and find any that need to update. Note that when we do find an
65916546 // update, if it includes actions that must be taken afterwards, we have to drop the
@@ -6611,12 +6566,9 @@ where
66116566 has_monitor_update = true;
66126567
66136568 let channel_id: ChannelId = *channel_id;
6614- let res = handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
6569+ handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
66156570 peer_state_lock, peer_state, per_peer_state, chan, MANUALLY_REMOVING,
66166571 peer_state.channel_by_id.remove(&channel_id));
6617- if res.is_err() {
6618- handle_errors.push((counterparty_node_id, res));
6619- }
66206572 continue 'peer_loop;
66216573 }
66226574 }
@@ -6626,15 +6578,11 @@ where
66266578 break 'peer_loop;
66276579 }
66286580
6629- let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty() ;
6581+ let has_update = has_monitor_update || !failed_htlcs.is_empty();
66306582 for (failures, channel_id, counterparty_node_id) in failed_htlcs.drain(..) {
66316583 self.fail_holding_cell_htlcs(failures, channel_id, &counterparty_node_id);
66326584 }
66336585
6634- for (counterparty_node_id, err) in handle_errors.drain(..) {
6635- let _ = handle_error!(self, err, counterparty_node_id);
6636- }
6637-
66386586 has_update
66396587 }
66406588
@@ -6961,11 +6909,8 @@ where
69616909 if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
69626910 log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
69636911 channel_funding_outpoint.to_channel_id());
6964- if let Err(e) = handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
6965- peer_state_lck, peer_state, per_peer_state, chan_phase_entry)
6966- {
6967- errors.push((e, counterparty_node_id));
6968- }
6912+ handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
6913+ peer_state_lck, peer_state, per_peer_state, chan_phase_entry);
69696914 if further_update_exists {
69706915 // If there are more `ChannelMonitorUpdate`s to process, restart at the
69716916 // top of the loop.
0 commit comments