@@ -7702,24 +7702,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
77027702 (htlc_forwards, decode_update_add_htlcs)
77037703 }
77047704
7705- fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: Option< &PublicKey> ) {
7705+ fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: &PublicKey) {
77067706 debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
77077707
7708- let counterparty_node_id = match counterparty_node_id {
7709- Some(cp_id) => cp_id.clone(),
7710- None => {
7711- // TODO: Once we can rely on the counterparty_node_id from the
7712- // monitor event, this and the outpoint_to_peer map should be removed.
7713- let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
7714- match outpoint_to_peer.get(funding_txo) {
7715- Some(cp_id) => cp_id.clone(),
7716- None => return,
7717- }
7718- }
7719- };
77207708 let per_peer_state = self.per_peer_state.read().unwrap();
77217709 let mut peer_state_lock;
7722- let peer_state_mutex_opt = per_peer_state.get(& counterparty_node_id);
7710+ let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
77237711 if peer_state_mutex_opt.is_none() { return }
77247712 peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
77257713 let peer_state = &mut *peer_state_lock;
@@ -7730,7 +7718,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
77307718 pending.len()
77317719 } else { 0 };
77327720
7733- let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*channel_id), None);
7721+ let logger = WithContext::from(&self.logger, Some(* counterparty_node_id), Some(*channel_id), None);
77347722 log_trace!(logger, "ChannelMonitor updated to {}. {} pending in-flight updates.",
77357723 highest_applied_update_id, remaining_in_flight);
77367724
@@ -9482,67 +9470,56 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
94829470 for monitor_event in monitor_events.drain(..) {
94839471 match monitor_event {
94849472 MonitorEvent::HTLCEvent(htlc_update) => {
9485- let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id), Some(htlc_update.payment_hash));
9473+ let logger = WithContext::from(&self.logger, Some( counterparty_node_id) , Some(channel_id), Some(htlc_update.payment_hash));
94869474 if let Some(preimage) = htlc_update.payment_preimage {
94879475 log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
94889476 self.claim_funds_internal(htlc_update.source, preimage,
94899477 htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true,
9490- false, counterparty_node_id, funding_outpoint, channel_id, None);
9478+ false, Some( counterparty_node_id) , funding_outpoint, channel_id, None);
94919479 } else {
94929480 log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
9493- let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id };
9481+ let receiver = HTLCDestination::NextHopChannel { node_id: Some( counterparty_node_id) , channel_id };
94949482 let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
94959483 self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
94969484 }
94979485 },
94989486 MonitorEvent::HolderForceClosed(_) | MonitorEvent::HolderForceClosedWithInfo { .. } => {
9499- let counterparty_node_id_opt = match counterparty_node_id {
9500- Some(cp_id) => Some(cp_id),
9501- None => {
9502- // TODO: Once we can rely on the counterparty_node_id from the
9503- // monitor event, this and the outpoint_to_peer map should be removed.
9504- let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
9505- outpoint_to_peer.get(&funding_outpoint).cloned()
9506- }
9507- };
9508- if let Some(counterparty_node_id) = counterparty_node_id_opt {
9509- let per_peer_state = self.per_peer_state.read().unwrap();
9510- if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
9511- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9512- let peer_state = &mut *peer_state_lock;
9513- let pending_msg_events = &mut peer_state.pending_msg_events;
9514- if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(channel_id) {
9515- let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
9516- reason
9517- } else {
9518- ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }
9519- };
9520- let mut shutdown_res = chan_entry.get_mut().force_shutdown(false, reason.clone());
9521- let chan = remove_channel_entry!(self, peer_state, chan_entry, shutdown_res);
9522- failed_channels.push(shutdown_res);
9523- if let Some(funded_chan) = chan.as_funded() {
9524- if let Ok(update) = self.get_channel_update_for_broadcast(funded_chan) {
9525- let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
9526- pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate {
9527- msg: update
9528- });
9529- }
9530- pending_msg_events.push(MessageSendEvent::HandleError {
9531- node_id: funded_chan.context.get_counterparty_node_id(),
9532- action: msgs::ErrorAction::DisconnectPeer {
9533- msg: Some(msgs::ErrorMessage {
9534- channel_id: funded_chan.context.channel_id(),
9535- data: reason.to_string()
9536- })
9537- },
9487+ let per_peer_state = self.per_peer_state.read().unwrap();
9488+ if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
9489+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9490+ let peer_state = &mut *peer_state_lock;
9491+ let pending_msg_events = &mut peer_state.pending_msg_events;
9492+ if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(channel_id) {
9493+ let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
9494+ reason
9495+ } else {
9496+ ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }
9497+ };
9498+ let mut shutdown_res = chan_entry.get_mut().force_shutdown(false, reason.clone());
9499+ let chan = remove_channel_entry!(self, peer_state, chan_entry, shutdown_res);
9500+ failed_channels.push(shutdown_res);
9501+ if let Some(funded_chan) = chan.as_funded() {
9502+ if let Ok(update) = self.get_channel_update_for_broadcast(funded_chan) {
9503+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
9504+ pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate {
9505+ msg: update
95389506 });
95399507 }
9508+ pending_msg_events.push(MessageSendEvent::HandleError {
9509+ node_id: counterparty_node_id,
9510+ action: msgs::ErrorAction::DisconnectPeer {
9511+ msg: Some(msgs::ErrorMessage {
9512+ channel_id: funded_chan.context.channel_id(),
9513+ data: reason.to_string()
9514+ })
9515+ },
9516+ });
95409517 }
95419518 }
95429519 }
95439520 },
95449521 MonitorEvent::Completed { funding_txo, channel_id, monitor_update_id } => {
9545- self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, counterparty_node_id.as_ref() );
9522+ self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, & counterparty_node_id);
95469523 },
95479524 }
95489525 }
@@ -13772,26 +13749,26 @@ where
1377213749 for (channel_id, monitor) in args.channel_monitors.iter() {
1377313750 if !channel_id_set.contains(channel_id) {
1377413751 let mut should_queue_fc_update = false;
13775- if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
13776- // If the ChannelMonitor had any updates, we may need to update it further and
13777- // thus track it in `closed_channel_monitor_update_ids`. If the channel never
13778- // had any updates at all, there can't be any HTLCs pending which we need to
13779- // claim.
13780- // Note that a `ChannelMonitor` is created with `update_id` 0 and after we
13781- // provide it with a closure update its `update_id` will be at 1.
13782- if !monitor.no_further_updates_allowed() || monitor.get_latest_update_id() > 1 {
13783- should_queue_fc_update = !monitor.no_further_updates_allowed();
13784- let mut latest_update_id = monitor.get_latest_update_id();
13785- if should_queue_fc_update {
13786- latest_update_id += 1;
13787- }
13788- per_peer_state.entry(counterparty_node_id)
13789- .or_insert_with(|| Mutex::new(empty_peer_state()))
13790- .lock().unwrap()
13791- .closed_channel_monitor_update_ids.entry(monitor.channel_id())
13792- .and_modify(|v| *v = cmp::max(latest_update_id, *v))
13793- .or_insert(latest_update_id);
13752+ let counterparty_node_id = monitor.get_counterparty_node_id();
13753+
13754+ // If the ChannelMonitor had any updates, we may need to update it further and
13755+ // thus track it in `closed_channel_monitor_update_ids`. If the channel never
13756+ // had any updates at all, there can't be any HTLCs pending which we need to
13757+ // claim.
13758+ // Note that a `ChannelMonitor` is created with `update_id` 0 and after we
13759+ // provide it with a closure update its `update_id` will be at 1.
13760+ if !monitor.no_further_updates_allowed() || monitor.get_latest_update_id() > 1 {
13761+ should_queue_fc_update = !monitor.no_further_updates_allowed();
13762+ let mut latest_update_id = monitor.get_latest_update_id();
13763+ if should_queue_fc_update {
13764+ latest_update_id += 1;
1379413765 }
13766+ per_peer_state.entry(counterparty_node_id)
13767+ .or_insert_with(|| Mutex::new(empty_peer_state()))
13768+ .lock().unwrap()
13769+ .closed_channel_monitor_update_ids.entry(monitor.channel_id())
13770+ .and_modify(|v| *v = cmp::max(latest_update_id, *v))
13771+ .or_insert(latest_update_id);
1379513772 }
1379613773
1379713774 if !should_queue_fc_update {
@@ -13802,31 +13779,20 @@ where
1380213779 let channel_id = monitor.channel_id();
1380313780 log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
1380413781 &channel_id);
13805- let mut monitor_update = ChannelMonitorUpdate {
13782+ let monitor_update = ChannelMonitorUpdate {
1380613783 update_id: monitor.get_latest_update_id().saturating_add(1),
13807- counterparty_node_id: None ,
13784+ counterparty_node_id: Some(counterparty_node_id) ,
1380813785 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
1380913786 channel_id: Some(monitor.channel_id()),
1381013787 };
1381113788 let funding_txo = monitor.get_funding_txo();
13812- if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
13813- let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13814- counterparty_node_id,
13815- funding_txo,
13816- channel_id,
13817- update: monitor_update,
13818- };
13819- close_background_events.push(update);
13820- } else {
13821- // This is a fairly old `ChannelMonitor` that hasn't seen an update to its
13822- // off-chain state since LDK 0.0.118 (as in LDK 0.0.119 any off-chain
13823- // `ChannelMonitorUpdate` will set the counterparty ID).
13824- // Thus, we assume that it has no pending HTLCs and we will not need to
13825- // generate a `ChannelMonitorUpdate` for it aside from this
13826- // `ChannelForceClosed` one.
13827- monitor_update.update_id = u64::MAX;
13828- close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, channel_id, monitor_update)));
13829- }
13789+ let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13790+ counterparty_node_id,
13791+ funding_txo,
13792+ channel_id,
13793+ update: monitor_update,
13794+ };
13795+ close_background_events.push(update);
1383013796 }
1383113797 }
1383213798
@@ -14385,7 +14351,7 @@ where
1438514351 // downstream chan is closed (because we don't have a
1438614352 // channel_id -> peer map entry).
1438714353 counterparty_opt.is_none(),
14388- counterparty_opt.cloned().or (monitor.get_counterparty_node_id()),
14354+ Some (monitor.get_counterparty_node_id()),
1438914355 monitor.get_funding_txo(), monitor.channel_id()))
1439014356 } else { None }
1439114357 } else {
@@ -15070,8 +15036,8 @@ mod tests {
1507015036 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1507115037
1507215038 create_announced_chan_between_nodes(&nodes, 0, 1);
15073-
15074- // Since we do not send peer storage, we manually simulate receiving a dummy
15039+
15040+ // Since we do not send peer storage, we manually simulate receiving a dummy
1507515041 // `PeerStorage` from the channel partner.
1507615042 nodes[0].node.handle_peer_storage(nodes[1].node.get_our_node_id(), msgs::PeerStorage{data: vec![0; 100]});
1507715043
0 commit comments