@@ -1989,11 +1989,11 @@ macro_rules! handle_new_monitor_update {
1989
1989
ChannelMonitorUpdateStatus::InProgress => {
1990
1990
log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
1991
1991
&$chan.context.channel_id());
1992
- Ok( false)
1992
+ false
1993
1993
},
1994
1994
ChannelMonitorUpdateStatus::Completed => {
1995
1995
$completed;
1996
- Ok( true)
1996
+ true
1997
1997
},
1998
1998
}
1999
1999
} };
@@ -2008,14 +2008,9 @@ macro_rules! handle_new_monitor_update {
2008
2008
$per_peer_state_lock, chan, MANUALLY_REMOVING_INITIAL_MONITOR, { $chan_entry.remove() })
2009
2009
} else {
2010
2010
// We're not supposed to handle monitor updates for unfunded channels (they have no monitors to
2011
- // update).
2012
- debug_assert!(false);
2013
- let channel_id = *$chan_entry.key();
2014
- let (_, err) = convert_chan_phase_err!($self, ChannelError::Close(
2015
- "Cannot update monitor for unfunded channels as they don't have monitors yet".into()),
2016
- $chan_entry.get_mut(), &channel_id);
2017
- $chan_entry.remove();
2018
- Err(err)
2011
+ // update). Throwing away a monitor update could be dangerous, so we assert even in
2012
+ // release builds.
2013
+ panic!("Initial Monitors should not exist for non-funded channels");
2019
2014
}
2020
2015
};
2021
2016
($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
@@ -2045,14 +2040,9 @@ macro_rules! handle_new_monitor_update {
2045
2040
$per_peer_state_lock, chan, MANUALLY_REMOVING, { $chan_entry.remove() })
2046
2041
} else {
2047
2042
// We're not supposed to handle monitor updates for unfunded channels (they have no monitors to
2048
- // update).
2049
- debug_assert!(false);
2050
- let channel_id = *$chan_entry.key();
2051
- let (_, err) = convert_chan_phase_err!($self, ChannelError::Close(
2052
- "Cannot update monitor for unfunded channels as they don't have monitors yet".into()),
2053
- $chan_entry.get_mut(), &channel_id);
2054
- $chan_entry.remove();
2055
- Err(err)
2043
+ // update). Throwing away a monitor update could be dangerous, so we assert even in
2044
+ // release builds.
2045
+ panic!("Monitor updates should not exist for non-funded channels");
2056
2046
}
2057
2047
}
2058
2048
}
@@ -2458,7 +2448,7 @@ where
2458
2448
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
2459
2449
2460
2450
let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
2461
- let result: Result<(), _> = loop {
2451
+ loop {
2462
2452
{
2463
2453
let per_peer_state = self.per_peer_state.read().unwrap();
2464
2454
@@ -2487,8 +2477,9 @@ where
2487
2477
2488
2478
// Update the monitor with the shutdown script if necessary.
2489
2479
if let Some(monitor_update) = monitor_update_opt.take() {
2490
- break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
2491
- peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ());
2480
+ handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
2481
+ peer_state_lock, peer_state, per_peer_state, chan_phase_entry);
2482
+ break;
2492
2483
}
2493
2484
2494
2485
if chan.is_shutdown() {
@@ -2501,7 +2492,7 @@ where
2501
2492
self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
2502
2493
}
2503
2494
}
2504
- break Ok(()) ;
2495
+ break;
2505
2496
}
2506
2497
},
2507
2498
hash_map::Entry::Vacant(_) => (),
@@ -2520,7 +2511,6 @@ where
2520
2511
self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
2521
2512
}
2522
2513
2523
- let _ = handle_error!(self, result, *counterparty_node_id);
2524
2514
Ok(())
2525
2515
}
2526
2516
@@ -3251,8 +3241,7 @@ where
3251
3241
match break_chan_phase_entry!(self, send_res, chan_phase_entry) {
3252
3242
Some(monitor_update) => {
3253
3243
match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan_phase_entry) {
3254
- Err(e) => break Err(e),
3255
- Ok(false) => {
3244
+ false => {
3256
3245
// Note that MonitorUpdateInProgress here indicates (per function
3257
3246
// docs) that we will resend the commitment update once monitor
3258
3247
// updating completes. Therefore, we must return an error
@@ -3261,7 +3250,7 @@ where
3261
3250
// MonitorUpdateInProgress, below.
3262
3251
return Err(APIError::MonitorUpdateInProgress);
3263
3252
},
3264
- Ok( true) => {},
3253
+ true => {},
3265
3254
}
3266
3255
},
3267
3256
None => {},
@@ -4330,7 +4319,7 @@ where
4330
4319
},
4331
4320
BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update } => {
4332
4321
let mut updated_chan = false;
4333
- let res = {
4322
+ {
4334
4323
let per_peer_state = self.per_peer_state.read().unwrap();
4335
4324
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
4336
4325
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
@@ -4339,24 +4328,16 @@ where
4339
4328
hash_map::Entry::Occupied(mut chan_phase) => {
4340
4329
updated_chan = true;
4341
4330
handle_new_monitor_update!(self, funding_txo, update.clone(),
4342
- peer_state_lock, peer_state, per_peer_state, chan_phase).map(|_| ())
4331
+ peer_state_lock, peer_state, per_peer_state, chan_phase);
4343
4332
},
4344
- hash_map::Entry::Vacant(_) => Ok(()) ,
4333
+ hash_map::Entry::Vacant(_) => {} ,
4345
4334
}
4346
- } else { Ok(()) }
4347
- };
4335
+ }
4336
+ }
4348
4337
if !updated_chan {
4349
4338
// TODO: Track this as in-flight even though the channel is closed.
4350
4339
let _ = self.chain_monitor.update_channel(funding_txo, &update);
4351
4340
}
4352
- // TODO: If this channel has since closed, we're likely providing a payment
4353
- // preimage update, which we must ensure is durable! We currently don't,
4354
- // however, ensure that.
4355
- if res.is_err() {
4356
- log_error!(self.logger,
4357
- "Failed to provide ChannelMonitorUpdate to closed channel! This likely lost us a payment preimage!");
4358
- }
4359
- let _ = handle_error!(self, res, counterparty_node_id);
4360
4341
},
4361
4342
BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
4362
4343
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -5079,15 +5060,8 @@ where
5079
5060
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
5080
5061
}
5081
5062
if !during_init {
5082
- let res = handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
5063
+ handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
5083
5064
peer_state, per_peer_state, chan_phase_entry);
5084
- if let Err(e) = res {
5085
- // TODO: This is a *critical* error - we probably updated the outbound edge
5086
- // of the HTLC's monitor with a preimage. We should retry this monitor
5087
- // update over and over again until morale improves.
5088
- log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
5089
- return Err((counterparty_node_id, e));
5090
- }
5091
5065
} else {
5092
5066
// If we're running during init we cannot update a monitor directly -
5093
5067
// they probably haven't actually been loaded yet. Instead, push the
@@ -5734,24 +5708,13 @@ where
5734
5708
});
5735
5709
5736
5710
if let ChannelPhase::Funded(chan) = e.insert(ChannelPhase::Funded(chan)) {
5737
- let mut res = handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
5711
+ handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
5738
5712
per_peer_state, chan, MANUALLY_REMOVING_INITIAL_MONITOR,
5739
5713
{ peer_state.channel_by_id.remove(&new_channel_id) });
5740
-
5741
- // Note that we reply with the new channel_id in error messages if we gave up on the
5742
- // channel, not the temporary_channel_id. This is compatible with ourselves, but the
5743
- // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
5744
- // any messages referencing a previously-closed channel anyway.
5745
- // We do not propagate the monitor update to the user as it would be for a monitor
5746
- // that we didn't manage to store (and that we don't care about - we don't respond
5747
- // with the funding_signed so the channel can never go on chain).
5748
- if let Err(MsgHandleErrInternal { shutdown_finish: Some((res, _)), .. }) = &mut res {
5749
- res.0 = None;
5750
- }
5751
- res.map(|_| ())
5752
5714
} else {
5753
5715
unreachable!("This must be a funded channel as we just inserted it.");
5754
5716
}
5717
+ Ok(())
5755
5718
} else {
5756
5719
log_error!(self.logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
5757
5720
return Err(MsgHandleErrInternal::send_err_msg_no_close(
@@ -5782,16 +5745,8 @@ where
5782
5745
let monitor = try_chan_phase_entry!(self,
5783
5746
chan.funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan_phase_entry);
5784
5747
if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
5785
- let mut res = handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan_phase_entry, INITIAL_MONITOR);
5786
- if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
5787
- // We weren't able to watch the channel to begin with, so no updates should be made on
5788
- // it. Previously, full_stack_target found an (unreachable) panic when the
5789
- // monitor update contained within `shutdown_finish` was applied.
5790
- if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
5791
- shutdown_finish.0.take();
5792
- }
5793
- }
5794
- res.map(|_| ())
5748
+ handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan_phase_entry, INITIAL_MONITOR);
5749
+ Ok(())
5795
5750
} else {
5796
5751
try_chan_phase_entry!(self, Err(ChannelError::Close("Channel funding outpoint was a duplicate".to_owned())), chan_phase_entry)
5797
5752
}
@@ -5894,8 +5849,8 @@ where
5894
5849
}
5895
5850
// Update the monitor with the shutdown script if necessary.
5896
5851
if let Some(monitor_update) = monitor_update_opt {
5897
- break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
5898
- peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ()) ;
5852
+ handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
5853
+ peer_state_lock, peer_state, per_peer_state, chan_phase_entry);
5899
5854
}
5900
5855
break Ok(());
5901
5856
},
@@ -6141,8 +6096,9 @@ where
6141
6096
let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &self.logger), chan_phase_entry);
6142
6097
if let Some(monitor_update) = monitor_update_opt {
6143
6098
handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
6144
- peer_state, per_peer_state, chan_phase_entry).map(|_| ())
6145
- } else { Ok(()) }
6099
+ peer_state, per_peer_state, chan_phase_entry);
6100
+ }
6101
+ Ok(())
6146
6102
} else {
6147
6103
return try_chan_phase_entry!(self, Err(ChannelError::Close(
6148
6104
"Got a commitment_signed message for an unfunded channel!".into())), chan_phase_entry);
@@ -6310,13 +6266,13 @@ where
6310
6266
} else { false };
6311
6267
let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self,
6312
6268
chan.revoke_and_ack(&msg, &self.fee_estimator, &self.logger, mon_update_blocked), chan_phase_entry);
6313
- let res = if let Some(monitor_update) = monitor_update_opt {
6269
+ if let Some(monitor_update) = monitor_update_opt {
6314
6270
let funding_txo = funding_txo_opt
6315
6271
.expect("Funding outpoint must have been set for RAA handling to succeed");
6316
6272
handle_new_monitor_update!(self, funding_txo, monitor_update,
6317
- peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ())
6318
- } else { Ok(()) };
6319
- (htlcs_to_fail, res )
6273
+ peer_state_lock, peer_state, per_peer_state, chan_phase_entry);
6274
+ }
6275
+ (htlcs_to_fail, Ok(()) )
6320
6276
} else {
6321
6277
return try_chan_phase_entry!(self, Err(ChannelError::Close(
6322
6278
"Got a revoke_and_ack message for an unfunded channel!".into())), chan_phase_entry);
@@ -6585,7 +6541,6 @@ where
6585
6541
fn check_free_holding_cells(&self) -> bool {
6586
6542
let mut has_monitor_update = false;
6587
6543
let mut failed_htlcs = Vec::new();
6588
- let mut handle_errors = Vec::new();
6589
6544
6590
6545
// Walk our list of channels and find any that need to update. Note that when we do find an
6591
6546
// update, if it includes actions that must be taken afterwards, we have to drop the
@@ -6611,12 +6566,9 @@ where
6611
6566
has_monitor_update = true;
6612
6567
6613
6568
let channel_id: ChannelId = *channel_id;
6614
- let res = handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
6569
+ handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
6615
6570
peer_state_lock, peer_state, per_peer_state, chan, MANUALLY_REMOVING,
6616
6571
peer_state.channel_by_id.remove(&channel_id));
6617
- if res.is_err() {
6618
- handle_errors.push((counterparty_node_id, res));
6619
- }
6620
6572
continue 'peer_loop;
6621
6573
}
6622
6574
}
@@ -6626,15 +6578,11 @@ where
6626
6578
break 'peer_loop;
6627
6579
}
6628
6580
6629
- let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty() ;
6581
+ let has_update = has_monitor_update || !failed_htlcs.is_empty();
6630
6582
for (failures, channel_id, counterparty_node_id) in failed_htlcs.drain(..) {
6631
6583
self.fail_holding_cell_htlcs(failures, channel_id, &counterparty_node_id);
6632
6584
}
6633
6585
6634
- for (counterparty_node_id, err) in handle_errors.drain(..) {
6635
- let _ = handle_error!(self, err, counterparty_node_id);
6636
- }
6637
-
6638
6586
has_update
6639
6587
}
6640
6588
@@ -6961,11 +6909,8 @@ where
6961
6909
if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
6962
6910
log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
6963
6911
channel_funding_outpoint.to_channel_id());
6964
- if let Err(e) = handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
6965
- peer_state_lck, peer_state, per_peer_state, chan_phase_entry)
6966
- {
6967
- errors.push((e, counterparty_node_id));
6968
- }
6912
+ handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
6913
+ peer_state_lck, peer_state, per_peer_state, chan_phase_entry);
6969
6914
if further_update_exists {
6970
6915
// If there are more `ChannelMonitorUpdate`s to process, restart at the
6971
6916
// top of the loop.
0 commit comments