@@ -7686,24 +7686,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
76867686 (htlc_forwards, decode_update_add_htlcs)
76877687 }
76887688
7689- fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: Option< &PublicKey> ) {
7689+ fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: &PublicKey) {
76907690 debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
76917691
7692- let counterparty_node_id = match counterparty_node_id {
7693- Some(cp_id) => cp_id.clone(),
7694- None => {
7695- // TODO: Once we can rely on the counterparty_node_id from the
7696- // monitor event, this and the outpoint_to_peer map should be removed.
7697- let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
7698- match outpoint_to_peer.get(funding_txo) {
7699- Some(cp_id) => cp_id.clone(),
7700- None => return,
7701- }
7702- }
7703- };
77047692 let per_peer_state = self.per_peer_state.read().unwrap();
77057693 let mut peer_state_lock;
7706- let peer_state_mutex_opt = per_peer_state.get(& counterparty_node_id);
7694+ let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
77077695 if peer_state_mutex_opt.is_none() { return }
77087696 peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
77097697 let peer_state = &mut *peer_state_lock;
@@ -7714,7 +7702,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
77147702 pending.len()
77157703 } else { 0 };
77167704
7717- let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*channel_id), None);
7705+ let logger = WithContext::from(&self.logger, Some(* counterparty_node_id), Some(*channel_id), None);
77187706 log_trace!(logger, "ChannelMonitor updated to {}. {} pending in-flight updates.",
77197707 highest_applied_update_id, remaining_in_flight);
77207708
@@ -9466,67 +9454,56 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
94669454 for monitor_event in monitor_events.drain(..) {
94679455 match monitor_event {
94689456 MonitorEvent::HTLCEvent(htlc_update) => {
9469- let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id), Some(htlc_update.payment_hash));
9457+ let logger = WithContext::from(&self.logger, Some( counterparty_node_id) , Some(channel_id), Some(htlc_update.payment_hash));
94709458 if let Some(preimage) = htlc_update.payment_preimage {
94719459 log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
94729460 self.claim_funds_internal(htlc_update.source, preimage,
94739461 htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true,
9474- false, counterparty_node_id, funding_outpoint, channel_id, None);
9462+ false, Some( counterparty_node_id) , funding_outpoint, channel_id, None);
94759463 } else {
94769464 log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
9477- let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id };
9465+ let receiver = HTLCDestination::NextHopChannel { node_id: Some( counterparty_node_id) , channel_id };
94789466 let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
94799467 self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
94809468 }
94819469 },
94829470 MonitorEvent::HolderForceClosed(_) | MonitorEvent::HolderForceClosedWithInfo { .. } => {
9483- let counterparty_node_id_opt = match counterparty_node_id {
9484- Some(cp_id) => Some(cp_id),
9485- None => {
9486- // TODO: Once we can rely on the counterparty_node_id from the
9487- // monitor event, this and the outpoint_to_peer map should be removed.
9488- let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
9489- outpoint_to_peer.get(&funding_outpoint).cloned()
9490- }
9491- };
9492- if let Some(counterparty_node_id) = counterparty_node_id_opt {
9493- let per_peer_state = self.per_peer_state.read().unwrap();
9494- if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
9495- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9496- let peer_state = &mut *peer_state_lock;
9497- let pending_msg_events = &mut peer_state.pending_msg_events;
9498- if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(channel_id) {
9499- let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
9500- reason
9501- } else {
9502- ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }
9503- };
9504- let mut shutdown_res = chan_entry.get_mut().force_shutdown(false, reason.clone());
9505- let chan = remove_channel_entry!(self, peer_state, chan_entry, shutdown_res);
9506- failed_channels.push(shutdown_res);
9507- if let Some(funded_chan) = chan.as_funded() {
9508- if let Ok(update) = self.get_channel_update_for_broadcast(funded_chan) {
9509- let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
9510- pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
9511- msg: update
9512- });
9513- }
9514- pending_msg_events.push(events::MessageSendEvent::HandleError {
9515- node_id: funded_chan.context.get_counterparty_node_id(),
9516- action: msgs::ErrorAction::DisconnectPeer {
9517- msg: Some(msgs::ErrorMessage {
9518- channel_id: funded_chan.context.channel_id(),
9519- data: reason.to_string()
9520- })
9521- },
9471+ let per_peer_state = self.per_peer_state.read().unwrap();
9472+ if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
9473+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9474+ let peer_state = &mut *peer_state_lock;
9475+ let pending_msg_events = &mut peer_state.pending_msg_events;
9476+ if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(channel_id) {
9477+ let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
9478+ reason
9479+ } else {
9480+ ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }
9481+ };
9482+ let mut shutdown_res = chan_entry.get_mut().force_shutdown(false, reason.clone());
9483+ let chan = remove_channel_entry!(self, peer_state, chan_entry, shutdown_res);
9484+ failed_channels.push(shutdown_res);
9485+ if let Some(funded_chan) = chan.as_funded() {
9486+ if let Ok(update) = self.get_channel_update_for_broadcast(funded_chan) {
9487+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
9488+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
9489+ msg: update
95229490 });
95239491 }
9492+ pending_msg_events.push(events::MessageSendEvent::HandleError {
9493+ node_id: counterparty_node_id,
9494+ action: msgs::ErrorAction::DisconnectPeer {
9495+ msg: Some(msgs::ErrorMessage {
9496+ channel_id,
9497+ data: reason.to_string()
9498+ })
9499+ },
9500+ });
95249501 }
95259502 }
95269503 }
95279504 },
95289505 MonitorEvent::Completed { funding_txo, channel_id, monitor_update_id } => {
9529- self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, counterparty_node_id.as_ref() );
9506+ self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, & counterparty_node_id);
95309507 },
95319508 }
95329509 }
@@ -13756,26 +13733,26 @@ where
1375613733 for (channel_id, monitor) in args.channel_monitors.iter() {
1375713734 if !channel_id_set.contains(channel_id) {
1375813735 let mut should_queue_fc_update = false;
13759- if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
13760- // If the ChannelMonitor had any updates, we may need to update it further and
13761- // thus track it in `closed_channel_monitor_update_ids`. If the channel never
13762- // had any updates at all, there can't be any HTLCs pending which we need to
13763- // claim.
13764- // Note that a `ChannelMonitor` is created with `update_id` 0 and after we
13765- // provide it with a closure update its `update_id` will be at 1.
13766- if !monitor.no_further_updates_allowed() || monitor.get_latest_update_id() > 1 {
13767- should_queue_fc_update = !monitor.no_further_updates_allowed();
13768- let mut latest_update_id = monitor.get_latest_update_id();
13769- if should_queue_fc_update {
13770- latest_update_id += 1;
13771- }
13772- per_peer_state.entry(counterparty_node_id)
13773- .or_insert_with(|| Mutex::new(empty_peer_state()))
13774- .lock().unwrap()
13775- .closed_channel_monitor_update_ids.entry(monitor.channel_id())
13776- .and_modify(|v| *v = cmp::max(latest_update_id, *v))
13777- .or_insert(latest_update_id);
13736+ let counterparty_node_id = monitor.get_counterparty_node_id();
13737+
13738+ // If the ChannelMonitor had any updates, we may need to update it further and
13739+ // thus track it in `closed_channel_monitor_update_ids`. If the channel never
13740+ // had any updates at all, there can't be any HTLCs pending which we need to
13741+ // claim.
13742+ // Note that a `ChannelMonitor` is created with `update_id` 0 and after we
13743+ // provide it with a closure update its `update_id` will be at 1.
13744+ if !monitor.no_further_updates_allowed() || monitor.get_latest_update_id() > 1 {
13745+ should_queue_fc_update = !monitor.no_further_updates_allowed();
13746+ let mut latest_update_id = monitor.get_latest_update_id();
13747+ if should_queue_fc_update {
13748+ latest_update_id += 1;
1377813749 }
13750+ per_peer_state.entry(counterparty_node_id)
13751+ .or_insert_with(|| Mutex::new(empty_peer_state()))
13752+ .lock().unwrap()
13753+ .closed_channel_monitor_update_ids.entry(monitor.channel_id())
13754+ .and_modify(|v| *v = cmp::max(latest_update_id, *v))
13755+ .or_insert(latest_update_id);
1377913756 }
1378013757
1378113758 if !should_queue_fc_update {
@@ -13786,31 +13763,20 @@ where
1378613763 let channel_id = monitor.channel_id();
1378713764 log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
1378813765 &channel_id);
13789- let mut monitor_update = ChannelMonitorUpdate {
13766+ let monitor_update = ChannelMonitorUpdate {
1379013767 update_id: monitor.get_latest_update_id().saturating_add(1),
13791- counterparty_node_id: None ,
13768+ counterparty_node_id: Some(counterparty_node_id) ,
1379213769 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
1379313770 channel_id: Some(monitor.channel_id()),
1379413771 };
1379513772 let funding_txo = monitor.get_funding_txo();
13796- if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
13797- let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13798- counterparty_node_id,
13799- funding_txo,
13800- channel_id,
13801- update: monitor_update,
13802- };
13803- close_background_events.push(update);
13804- } else {
13805- // This is a fairly old `ChannelMonitor` that hasn't seen an update to its
13806- // off-chain state since LDK 0.0.118 (as in LDK 0.0.119 any off-chain
13807- // `ChannelMonitorUpdate` will set the counterparty ID).
13808- // Thus, we assume that it has no pending HTLCs and we will not need to
13809- // generate a `ChannelMonitorUpdate` for it aside from this
13810- // `ChannelForceClosed` one.
13811- monitor_update.update_id = u64::MAX;
13812- close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, channel_id, monitor_update)));
13813- }
13773+ let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13774+ counterparty_node_id,
13775+ funding_txo,
13776+ channel_id,
13777+ update: monitor_update,
13778+ };
13779+ close_background_events.push(update);
1381413780 }
1381513781 }
1381613782
@@ -14369,7 +14335,7 @@ where
1436914335 // downstream chan is closed (because we don't have a
1437014336 // channel_id -> peer map entry).
1437114337 counterparty_opt.is_none(),
14372- counterparty_opt.cloned().or (monitor.get_counterparty_node_id()),
14338+ Some (monitor.get_counterparty_node_id()),
1437314339 monitor.get_funding_txo(), monitor.channel_id()))
1437414340 } else { None }
1437514341 } else {
@@ -15055,8 +15021,8 @@ mod tests {
1505515021 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1505615022
1505715023 create_announced_chan_between_nodes(&nodes, 0, 1);
15058-
15059- // Since we do not send peer storage, we manually simulate receiving a dummy
15024+
15025+ // Since we do not send peer storage, we manually simulate receiving a dummy
1506015026 // `PeerStorage` from the channel partner.
1506115027 nodes[0].node.handle_peer_storage(nodes[1].node.get_our_node_id(), msgs::PeerStorage{data: vec![0; 100]});
1506215028
0 commit comments