@@ -7687,24 +7687,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
76877687 (htlc_forwards, decode_update_add_htlcs)
76887688 }
76897689
7690- fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: Option< &PublicKey> ) {
7690+ fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: &PublicKey) {
76917691 debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
76927692
7693- let counterparty_node_id = match counterparty_node_id {
7694- Some(cp_id) => cp_id.clone(),
7695- None => {
7696- // TODO: Once we can rely on the counterparty_node_id from the
7697- // monitor event, this and the outpoint_to_peer map should be removed.
7698- let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
7699- match outpoint_to_peer.get(funding_txo) {
7700- Some(cp_id) => cp_id.clone(),
7701- None => return,
7702- }
7703- }
7704- };
77057693 let per_peer_state = self.per_peer_state.read().unwrap();
77067694 let mut peer_state_lock;
7707- let peer_state_mutex_opt = per_peer_state.get(& counterparty_node_id);
7695+ let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
77087696 if peer_state_mutex_opt.is_none() { return }
77097697 peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
77107698 let peer_state = &mut *peer_state_lock;
@@ -7715,7 +7703,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
77157703 pending.len()
77167704 } else { 0 };
77177705
7718- let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*channel_id), None);
7706+ let logger = WithContext::from(&self.logger, Some(* counterparty_node_id), Some(*channel_id), None);
77197707 log_trace!(logger, "ChannelMonitor updated to {}. {} pending in-flight updates.",
77207708 highest_applied_update_id, remaining_in_flight);
77217709
@@ -9467,67 +9455,56 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
94679455 for monitor_event in monitor_events.drain(..) {
94689456 match monitor_event {
94699457 MonitorEvent::HTLCEvent(htlc_update) => {
9470- let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id), Some(htlc_update.payment_hash));
9458+ let logger = WithContext::from(&self.logger, Some( counterparty_node_id) , Some(channel_id), Some(htlc_update.payment_hash));
94719459 if let Some(preimage) = htlc_update.payment_preimage {
94729460 log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
94739461 self.claim_funds_internal(htlc_update.source, preimage,
94749462 htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true,
9475- false, counterparty_node_id, funding_outpoint, channel_id, None);
9463+ false, Some( counterparty_node_id) , funding_outpoint, channel_id, None);
94769464 } else {
94779465 log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
9478- let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id };
9466+ let receiver = HTLCDestination::NextHopChannel { node_id: Some( counterparty_node_id) , channel_id };
94799467 let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
94809468 self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
94819469 }
94829470 },
94839471 MonitorEvent::HolderForceClosed(_) | MonitorEvent::HolderForceClosedWithInfo { .. } => {
9484- let counterparty_node_id_opt = match counterparty_node_id {
9485- Some(cp_id) => Some(cp_id),
9486- None => {
9487- // TODO: Once we can rely on the counterparty_node_id from the
9488- // monitor event, this and the outpoint_to_peer map should be removed.
9489- let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
9490- outpoint_to_peer.get(&funding_outpoint).cloned()
9491- }
9492- };
9493- if let Some(counterparty_node_id) = counterparty_node_id_opt {
9494- let per_peer_state = self.per_peer_state.read().unwrap();
9495- if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
9496- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9497- let peer_state = &mut *peer_state_lock;
9498- let pending_msg_events = &mut peer_state.pending_msg_events;
9499- if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(channel_id) {
9500- let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
9501- reason
9502- } else {
9503- ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }
9504- };
9505- let mut shutdown_res = chan_entry.get_mut().force_shutdown(false, reason.clone());
9506- let chan = remove_channel_entry!(self, peer_state, chan_entry, shutdown_res);
9507- failed_channels.push(shutdown_res);
9508- if let Some(funded_chan) = chan.as_funded() {
9509- if let Ok(update) = self.get_channel_update_for_broadcast(funded_chan) {
9510- let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
9511- pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate {
9512- msg: update
9513- });
9514- }
9515- pending_msg_events.push(MessageSendEvent::HandleError {
9516- node_id: funded_chan.context.get_counterparty_node_id(),
9517- action: msgs::ErrorAction::DisconnectPeer {
9518- msg: Some(msgs::ErrorMessage {
9519- channel_id: funded_chan.context.channel_id(),
9520- data: reason.to_string()
9521- })
9522- },
9472+ let per_peer_state = self.per_peer_state.read().unwrap();
9473+ if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
9474+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9475+ let peer_state = &mut *peer_state_lock;
9476+ let pending_msg_events = &mut peer_state.pending_msg_events;
9477+ if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(channel_id) {
9478+ let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
9479+ reason
9480+ } else {
9481+ ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }
9482+ };
9483+ let mut shutdown_res = chan_entry.get_mut().force_shutdown(false, reason.clone());
9484+ let chan = remove_channel_entry!(self, peer_state, chan_entry, shutdown_res);
9485+ failed_channels.push(shutdown_res);
9486+ if let Some(funded_chan) = chan.as_funded() {
9487+ if let Ok(update) = self.get_channel_update_for_broadcast(funded_chan) {
9488+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
9489+ pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate {
9490+ msg: update
95239491 });
95249492 }
9493+ pending_msg_events.push(MessageSendEvent::HandleError {
9494+ node_id: counterparty_node_id,
9495+ action: msgs::ErrorAction::DisconnectPeer {
9496+ msg: Some(msgs::ErrorMessage {
9497+ channel_id: funded_chan.context.channel_id(),
9498+ data: reason.to_string()
9499+ })
9500+ },
9501+ });
95259502 }
95269503 }
95279504 }
95289505 },
95299506 MonitorEvent::Completed { funding_txo, channel_id, monitor_update_id } => {
9530- self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, counterparty_node_id.as_ref() );
9507+ self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, & counterparty_node_id);
95319508 },
95329509 }
95339510 }
@@ -13757,26 +13734,26 @@ where
1375713734 for (channel_id, monitor) in args.channel_monitors.iter() {
1375813735 if !channel_id_set.contains(channel_id) {
1375913736 let mut should_queue_fc_update = false;
13760- if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
13761- // If the ChannelMonitor had any updates, we may need to update it further and
13762- // thus track it in `closed_channel_monitor_update_ids`. If the channel never
13763- // had any updates at all, there can't be any HTLCs pending which we need to
13764- // claim.
13765- // Note that a `ChannelMonitor` is created with `update_id` 0 and after we
13766- // provide it with a closure update its `update_id` will be at 1.
13767- if !monitor.no_further_updates_allowed() || monitor.get_latest_update_id() > 1 {
13768- should_queue_fc_update = !monitor.no_further_updates_allowed();
13769- let mut latest_update_id = monitor.get_latest_update_id();
13770- if should_queue_fc_update {
13771- latest_update_id += 1;
13772- }
13773- per_peer_state.entry(counterparty_node_id)
13774- .or_insert_with(|| Mutex::new(empty_peer_state()))
13775- .lock().unwrap()
13776- .closed_channel_monitor_update_ids.entry(monitor.channel_id())
13777- .and_modify(|v| *v = cmp::max(latest_update_id, *v))
13778- .or_insert(latest_update_id);
13737+ let counterparty_node_id = monitor.get_counterparty_node_id();
13738+
13739+ // If the ChannelMonitor had any updates, we may need to update it further and
13740+ // thus track it in `closed_channel_monitor_update_ids`. If the channel never
13741+ // had any updates at all, there can't be any HTLCs pending which we need to
13742+ // claim.
13743+ // Note that a `ChannelMonitor` is created with `update_id` 0 and after we
13744+ // provide it with a closure update its `update_id` will be at 1.
13745+ if !monitor.no_further_updates_allowed() || monitor.get_latest_update_id() > 1 {
13746+ should_queue_fc_update = !monitor.no_further_updates_allowed();
13747+ let mut latest_update_id = monitor.get_latest_update_id();
13748+ if should_queue_fc_update {
13749+ latest_update_id += 1;
1377913750 }
13751+ per_peer_state.entry(counterparty_node_id)
13752+ .or_insert_with(|| Mutex::new(empty_peer_state()))
13753+ .lock().unwrap()
13754+ .closed_channel_monitor_update_ids.entry(monitor.channel_id())
13755+ .and_modify(|v| *v = cmp::max(latest_update_id, *v))
13756+ .or_insert(latest_update_id);
1378013757 }
1378113758
1378213759 if !should_queue_fc_update {
@@ -13787,31 +13764,20 @@ where
1378713764 let channel_id = monitor.channel_id();
1378813765 log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
1378913766 &channel_id);
13790- let mut monitor_update = ChannelMonitorUpdate {
13767+ let monitor_update = ChannelMonitorUpdate {
1379113768 update_id: monitor.get_latest_update_id().saturating_add(1),
13792- counterparty_node_id: None ,
13769+ counterparty_node_id: Some(counterparty_node_id) ,
1379313770 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
1379413771 channel_id: Some(monitor.channel_id()),
1379513772 };
1379613773 let funding_txo = monitor.get_funding_txo();
13797- if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
13798- let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13799- counterparty_node_id,
13800- funding_txo,
13801- channel_id,
13802- update: monitor_update,
13803- };
13804- close_background_events.push(update);
13805- } else {
13806- // This is a fairly old `ChannelMonitor` that hasn't seen an update to its
13807- // off-chain state since LDK 0.0.118 (as in LDK 0.0.119 any off-chain
13808- // `ChannelMonitorUpdate` will set the counterparty ID).
13809- // Thus, we assume that it has no pending HTLCs and we will not need to
13810- // generate a `ChannelMonitorUpdate` for it aside from this
13811- // `ChannelForceClosed` one.
13812- monitor_update.update_id = u64::MAX;
13813- close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, channel_id, monitor_update)));
13814- }
13774+ let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13775+ counterparty_node_id,
13776+ funding_txo,
13777+ channel_id,
13778+ update: monitor_update,
13779+ };
13780+ close_background_events.push(update);
1381513781 }
1381613782 }
1381713783
@@ -14370,7 +14336,7 @@ where
1437014336 // downstream chan is closed (because we don't have a
1437114337 // channel_id -> peer map entry).
1437214338 counterparty_opt.is_none(),
14373- counterparty_opt.cloned().or (monitor.get_counterparty_node_id()),
14339+ Some (monitor.get_counterparty_node_id()),
1437414340 monitor.get_funding_txo(), monitor.channel_id()))
1437514341 } else { None }
1437614342 } else {
@@ -15055,8 +15021,8 @@ mod tests {
1505515021 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1505615022
1505715023 create_announced_chan_between_nodes(&nodes, 0, 1);
15058-
15059- // Since we do not send peer storage, we manually simulate receiving a dummy
15024+
15025+ // Since we do not send peer storage, we manually simulate receiving a dummy
1506015026 // `PeerStorage` from the channel partner.
1506115027 nodes[0].node.handle_peer_storage(nodes[1].node.get_our_node_id(), msgs::PeerStorage{data: vec![0; 100]});
1506215028
0 commit comments