Skip to content

Commit afe8af8

Browse files
committed
Always process claim ChannelMonitorUpdates asynchronously
We currently have two codepaths on most channel update functions - most methods return a set of messages to send a peer iff the `ChannelMonitorUpdate` succeeds, but if it does not we push the messages back into the `Channel` and then pull them back out when the `ChannelMonitorUpdate` completes and send them then. This adds a substantial amount of complexity in very critical codepaths. Instead, here we swap all our channel update codepaths to immediately set the channel-update-required flag and only return a `ChannelMonitorUpdate` to the `ChannelManager`. Internally in the `Channel` we store a queue of `ChannelMonitorUpdate`s, which will become critical in future work to surface pending `ChannelMonitorUpdate`s to users at startup so they can complete. This leaves some redundant work in `Channel` to be cleaned up later. Specifically, we still generate the messages which we will now ignore and regenerate later. This commit updates the `ChannelMonitorUpdate` pipeline for `claim_funds_from_hop`, ie the `update_fulfill_htlc`-generation pipeline.
1 parent bb0db2a commit afe8af8

File tree

4 files changed

+107
-121
lines changed

4 files changed

+107
-121
lines changed

lightning/src/chain/chainmonitor.rs

Lines changed: 17 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -796,7 +796,7 @@ mod tests {
796796
use crate::ln::functional_test_utils::*;
797797
use crate::ln::msgs::ChannelMessageHandler;
798798
use crate::util::errors::APIError;
799-
use crate::util::events::{ClosureReason, MessageSendEvent, MessageSendEventsProvider};
799+
use crate::util::events::{Event, ClosureReason, MessageSendEvent, MessageSendEventsProvider};
800800

801801
#[test]
802802
fn test_async_ooo_offchain_updates() {
@@ -819,10 +819,8 @@ mod tests {
819819

820820
nodes[1].node.claim_funds(payment_preimage_1);
821821
check_added_monitors!(nodes[1], 1);
822-
expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
823822
nodes[1].node.claim_funds(payment_preimage_2);
824823
check_added_monitors!(nodes[1], 1);
825-
expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000);
826824

827825
let persistences = chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clone();
828826
assert_eq!(persistences.len(), 1);
@@ -850,8 +848,24 @@ mod tests {
850848
.find(|(txo, _)| txo == funding_txo).unwrap().1.contains(&next_update));
851849
assert!(nodes[1].chain_monitor.release_pending_monitor_events().is_empty());
852850
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
851+
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
853852
nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, update_iter.next().unwrap().clone()).unwrap();
854853

854+
let claim_events = nodes[1].node.get_and_clear_pending_events();
855+
assert_eq!(claim_events.len(), 2);
856+
match claim_events[0] {
857+
Event::PaymentClaimed { ref payment_hash, amount_msat: 1_000_000, .. } => {
858+
assert_eq!(payment_hash_1, *payment_hash);
859+
},
860+
_ => panic!("Unexpected event"),
861+
}
862+
match claim_events[1] {
863+
Event::PaymentClaimed { ref payment_hash, amount_msat: 1_000_000, .. } => {
864+
assert_eq!(payment_hash_2, *payment_hash);
865+
},
866+
_ => panic!("Unexpected event"),
867+
}
868+
855869
// Now manually walk the commitment signed dance - because we claimed two payments
856870
// back-to-back it doesn't fit into the neat walk commitment_signed_dance does.
857871

lightning/src/ln/chanmon_update_fail_tests.rs

Lines changed: 22 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1602,7 +1602,6 @@ fn test_monitor_update_fail_claim() {
16021602

16031603
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
16041604
nodes[1].node.claim_funds(payment_preimage_1);
1605-
expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
16061605
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
16071606
check_added_monitors!(nodes[1], 1);
16081607

@@ -1628,6 +1627,7 @@ fn test_monitor_update_fail_claim() {
16281627
let events = nodes[1].node.get_and_clear_pending_msg_events();
16291628
assert_eq!(events.len(), 0);
16301629
commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
1630+
expect_pending_htlcs_forwardable_ignore!(nodes[1]);
16311631

16321632
let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[0]);
16331633
nodes[2].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
@@ -1645,6 +1645,7 @@ fn test_monitor_update_fail_claim() {
16451645
let channel_id = chan_1.2;
16461646
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
16471647
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1648+
expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
16481649
check_added_monitors!(nodes[1], 0);
16491650

16501651
let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -1653,7 +1654,7 @@ fn test_monitor_update_fail_claim() {
16531654
expect_payment_sent!(nodes[0], payment_preimage_1);
16541655

16551656
// Get the payment forwards, note that they were batched into one commitment update.
1656-
expect_pending_htlcs_forwardable!(nodes[1]);
1657+
nodes[1].node.process_pending_htlc_forwards();
16571658
check_added_monitors!(nodes[1], 1);
16581659
let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
16591660
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]);
@@ -1802,14 +1803,14 @@ fn monitor_update_claim_fail_no_response() {
18021803

18031804
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
18041805
nodes[1].node.claim_funds(payment_preimage_1);
1805-
expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
18061806
check_added_monitors!(nodes[1], 1);
18071807

18081808
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
18091809

18101810
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
18111811
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
18121812
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1813+
expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
18131814
check_added_monitors!(nodes[1], 0);
18141815
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
18151816

@@ -2289,7 +2290,6 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
22892290
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
22902291
nodes[0].node.claim_funds(payment_preimage_0);
22912292
check_added_monitors!(nodes[0], 1);
2292-
expect_payment_claimed!(nodes[0], payment_hash_0, 100_000);
22932293

22942294
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send.msgs[0]);
22952295
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send.commitment_msg);
@@ -2352,6 +2352,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
23522352
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
23532353
let (funding_txo, mon_id, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone();
23542354
nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_txo, mon_id);
2355+
expect_payment_claimed!(nodes[0], payment_hash_0, 100_000);
23552356

23562357
// New outbound messages should be generated immediately upon a call to
23572358
// get_and_clear_pending_msg_events (but not before).
@@ -2650,15 +2651,13 @@ fn double_temp_error() {
26502651
// `claim_funds` results in a ChannelMonitorUpdate.
26512652
nodes[1].node.claim_funds(payment_preimage_1);
26522653
check_added_monitors!(nodes[1], 1);
2653-
expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
26542654
let (funding_tx, latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
26552655

26562656
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
26572657
// Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`,
26582658
// which had some asserts that prevented it from being called twice.
26592659
nodes[1].node.claim_funds(payment_preimage_2);
26602660
check_added_monitors!(nodes[1], 1);
2661-
expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000);
26622661
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
26632662

26642663
let (_, latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
@@ -2667,11 +2666,24 @@ fn double_temp_error() {
26672666
check_added_monitors!(nodes[1], 0);
26682667
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_2);
26692668

2670-
// Complete the first HTLC.
2671-
let events = nodes[1].node.get_and_clear_pending_msg_events();
2672-
assert_eq!(events.len(), 1);
2669+
// Complete the first HTLC. Note that as a side-effect we handle the monitor update completions
2670+
// and get both PaymentClaimed events at once.
2671+
let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
2672+
2673+
let events = nodes[1].node.get_and_clear_pending_events();
2674+
assert_eq!(events.len(), 2);
2675+
match events[0] {
2676+
Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_1),
2677+
_ => panic!("Unexpected Event: {:?}", events[0]),
2678+
}
2679+
match events[1] {
2680+
Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_2),
2681+
_ => panic!("Unexpected Event: {:?}", events[1]),
2682+
}
2683+
2684+
assert_eq!(msg_events.len(), 1);
26732685
let (update_fulfill_1, commitment_signed_b1, node_id) = {
2674-
match &events[0] {
2686+
match &msg_events[0] {
26752687
&MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
26762688
assert!(update_add_htlcs.is_empty());
26772689
assert_eq!(update_fulfill_htlcs.len(), 1);

lightning/src/ln/channel.rs

Lines changed: 21 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -393,18 +393,15 @@ enum UpdateFulfillFetch {
393393
}
394394

395395
/// The return type of get_update_fulfill_htlc_and_commit.
396-
pub enum UpdateFulfillCommitFetch {
396+
pub enum UpdateFulfillCommitFetch<'a> {
397397
/// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
398398
/// it in the holding cell, or re-generated the update_fulfill message after the same claim was
399399
/// previously placed in the holding cell (and has since been removed).
400400
NewClaim {
401401
/// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
402-
monitor_update: ChannelMonitorUpdate,
402+
monitor_update: &'a ChannelMonitorUpdate,
403403
/// The value of the HTLC which was claimed, in msat.
404404
htlc_value_msat: u64,
405-
/// The update_fulfill message and commitment_signed message (if the claim was not placed
406-
/// in the holding cell).
407-
msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)>,
408405
},
409406
/// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
410407
/// or has been forgotten (presumably previously claimed).
@@ -1968,22 +1965,30 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
19681965
}
19691966
}
19701967

1971-
pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> Result<UpdateFulfillCommitFetch, (ChannelError, ChannelMonitorUpdate)> where L::Target: Logger {
1968+
pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
19721969
match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
1973-
UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg: Some(update_fulfill_htlc) } => {
1974-
let (commitment, mut additional_update) = match self.send_commitment_no_status_check(logger) {
1975-
Err(e) => return Err((e, monitor_update)),
1976-
Ok(res) => res
1977-
};
1978-
// send_commitment_no_status_check may bump latest_monitor_id but we want them to be
1970+
UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg: Some(_) } => {
1971+
let mut additional_update = self.build_commitment_no_status_check(logger);
1972+
// build_commitment_no_status_check may bump latest_monitor_id but we want them to be
19791973
// strictly increasing by one, so decrement it here.
19801974
self.latest_monitor_update_id = monitor_update.update_id;
19811975
monitor_update.updates.append(&mut additional_update.updates);
1982-
Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, msgs: Some((update_fulfill_htlc, commitment)) })
1976+
self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
1977+
self.pending_monitor_updates.push(monitor_update);
1978+
UpdateFulfillCommitFetch::NewClaim {
1979+
monitor_update: self.pending_monitor_updates.last().unwrap(),
1980+
htlc_value_msat,
1981+
}
19831982
},
1984-
UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None } =>
1985-
Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, msgs: None }),
1986-
UpdateFulfillFetch::DuplicateClaim {} => Ok(UpdateFulfillCommitFetch::DuplicateClaim {}),
1983+
UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None } => {
1984+
self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
1985+
self.pending_monitor_updates.push(monitor_update);
1986+
UpdateFulfillCommitFetch::NewClaim {
1987+
monitor_update: self.pending_monitor_updates.last().unwrap(),
1988+
htlc_value_msat,
1989+
}
1990+
}
1991+
UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
19871992
}
19881993
}
19891994

lightning/src/ln/channelmanager.rs

Lines changed: 47 additions & 92 deletions
Original file line numberDiff line numberDiff line change
@@ -4026,7 +4026,6 @@ where
40264026

40274027
let per_peer_state = self.per_peer_state.read().unwrap();
40284028
let chan_id = prev_hop.outpoint.to_channel_id();
4029-
40304029
let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
40314030
Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()),
40324031
None => None
@@ -4038,99 +4037,57 @@ where
40384037
)
40394038
).unwrap_or(None);
40404039

4041-
if let Some(hash_map::Entry::Occupied(mut chan)) = peer_state_opt.as_mut().map(|peer_state| peer_state.channel_by_id.entry(chan_id))
4042-
{
4043-
let counterparty_node_id = chan.get().get_counterparty_node_id();
4044-
match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
4045-
Ok(msgs_monitor_option) => {
4046-
if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option {
4047-
match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update) {
4048-
ChannelMonitorUpdateStatus::Completed => {},
4049-
e => {
4050-
log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Debug },
4051-
"Failed to update channel monitor with preimage {:?}: {:?}",
4052-
payment_preimage, e);
4053-
let err = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err();
4054-
mem::drop(peer_state_opt);
4055-
mem::drop(per_peer_state);
4056-
self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat)));
4057-
return Err((counterparty_node_id, err));
4058-
}
4059-
}
4060-
if let Some((msg, commitment_signed)) = msgs {
4061-
log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
4062-
log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id()));
4063-
peer_state_opt.as_mut().unwrap().pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
4064-
node_id: counterparty_node_id,
4065-
updates: msgs::CommitmentUpdate {
4066-
update_add_htlcs: Vec::new(),
4067-
update_fulfill_htlcs: vec![msg],
4068-
update_fail_htlcs: Vec::new(),
4069-
update_fail_malformed_htlcs: Vec::new(),
4070-
update_fee: None,
4071-
commitment_signed,
4072-
}
4073-
});
4074-
}
4075-
mem::drop(peer_state_opt);
4076-
mem::drop(per_peer_state);
4077-
self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat)));
4078-
Ok(())
4079-
} else {
4080-
Ok(())
4081-
}
4082-
},
4083-
Err((e, monitor_update)) => {
4084-
match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update) {
4085-
ChannelMonitorUpdateStatus::Completed => {},
4086-
e => {
4087-
// TODO: This needs to be handled somehow - if we receive a monitor update
4088-
// with a preimage we *must* somehow manage to propagate it to the upstream
4089-
// channel, or we must have an ability to receive the same update and try
4090-
// again on restart.
4091-
log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Info },
4092-
"Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
4093-
payment_preimage, e);
4094-
},
4040+
if let Some(mut peer_state_lock) = peer_state_opt.take() {
4041+
let peer_state = &mut *peer_state_lock;
4042+
if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) {
4043+
let counterparty_node_id = chan.get().get_counterparty_node_id();
4044+
let fulfill_res = chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
4045+
4046+
if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res {
4047+
if let Some(action) = completion_action(Some(htlc_value_msat)) {
4048+
log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
4049+
log_bytes!(chan_id), action);
4050+
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
40954051
}
4096-
let (drop, res) = convert_chan_err!(self, e, chan.get_mut(), &chan_id);
4097-
if drop {
4098-
chan.remove_entry();
4052+
let update_id = monitor_update.update_id;
4053+
let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, monitor_update);
4054+
let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
4055+
peer_state, chan);
4056+
if let Err(e) = res {
4057+
// TODO: This is a *critical* error - we probably updated the outbound edge
4058+
// of the HTLC's monitor with a preimage. We should retry this monitor
4059+
// update over and over again until morale improves.
4060+
log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
4061+
return Err((counterparty_node_id, e));
40994062
}
4100-
mem::drop(peer_state_opt);
4101-
mem::drop(per_peer_state);
4102-
self.handle_monitor_update_completion_actions(completion_action(None));
4103-
Err((counterparty_node_id, res))
4104-
},
4105-
}
4106-
} else {
4107-
let preimage_update = ChannelMonitorUpdate {
4108-
update_id: CLOSED_CHANNEL_UPDATE_ID,
4109-
updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
4110-
payment_preimage,
4111-
}],
4112-
};
4113-
// We update the ChannelMonitor on the backward link, after
4114-
// receiving an `update_fulfill_htlc` from the forward link.
4115-
let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &preimage_update);
4116-
if update_res != ChannelMonitorUpdateStatus::Completed {
4117-
// TODO: This needs to be handled somehow - if we receive a monitor update
4118-
// with a preimage we *must* somehow manage to propagate it to the upstream
4119-
// channel, or we must have an ability to receive the same event and try
4120-
// again on restart.
4121-
log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
4122-
payment_preimage, update_res);
4063+
}
4064+
return Ok(());
41234065
}
4124-
mem::drop(peer_state_opt);
4125-
mem::drop(per_peer_state);
4126-
// Note that we do process the completion action here. This totally could be a
4127-
// duplicate claim, but we have no way of knowing without interrogating the
4128-
// `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are
4129-
// generally always allowed to be duplicative (and it's specifically noted in
4130-
// `PaymentForwarded`).
4131-
self.handle_monitor_update_completion_actions(completion_action(None));
4132-
Ok(())
41334066
}
4067+
let preimage_update = ChannelMonitorUpdate {
4068+
update_id: CLOSED_CHANNEL_UPDATE_ID,
4069+
updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
4070+
payment_preimage,
4071+
}],
4072+
};
4073+
// We update the ChannelMonitor on the backward link, after
4074+
// receiving an `update_fulfill_htlc` from the forward link.
4075+
let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &preimage_update);
4076+
if update_res != ChannelMonitorUpdateStatus::Completed {
4077+
// TODO: This needs to be handled somehow - if we receive a monitor update
4078+
// with a preimage we *must* somehow manage to propagate it to the upstream
4079+
// channel, or we must have an ability to receive the same event and try
4080+
// again on restart.
4081+
log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
4082+
payment_preimage, update_res);
4083+
}
4084+
// Note that we do process the completion action here. This totally could be a
4085+
// duplicate claim, but we have no way of knowing without interrogating the
4086+
// `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are
4087+
// generally always allowed to be duplicative (and it's specifically noted in
4088+
// `PaymentForwarded`).
4089+
self.handle_monitor_update_completion_actions(completion_action(None));
4090+
Ok(())
41344091
}
41354092

41364093
fn finalize_claims(&self, sources: Vec<HTLCSource>) {
@@ -7044,8 +7001,6 @@ where
70447001
// LDK versions prior to 0.0.113 do not know how to read the pending claimed payments
70457002
// map. Thus, if there are no entries we skip writing a TLV for it.
70467003
pending_claiming_payments = None;
7047-
} else {
7048-
debug_assert!(false, "While we have code to serialize pending_claiming_payments, the map should always be empty until a later PR");
70497004
}
70507005

70517006
write_tlv_fields!(writer, {

0 commit comments

Comments
 (0)