@@ -7690,24 +7690,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
76907690 (htlc_forwards, decode_update_add_htlcs)
76917691 }
76927692
7693- fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: Option< &PublicKey> ) {
7693+ fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: &PublicKey) {
76947694 debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
76957695
7696- let counterparty_node_id = match counterparty_node_id {
7697- Some(cp_id) => cp_id.clone(),
7698- None => {
7699- // TODO: Once we can rely on the counterparty_node_id from the
7700- // monitor event, this and the outpoint_to_peer map should be removed.
7701- let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
7702- match outpoint_to_peer.get(funding_txo) {
7703- Some(cp_id) => cp_id.clone(),
7704- None => return,
7705- }
7706- }
7707- };
77087696 let per_peer_state = self.per_peer_state.read().unwrap();
77097697 let mut peer_state_lock;
7710- let peer_state_mutex_opt = per_peer_state.get(& counterparty_node_id);
7698+ let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
77117699 if peer_state_mutex_opt.is_none() { return }
77127700 peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
77137701 let peer_state = &mut *peer_state_lock;
@@ -7718,7 +7706,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
77187706 pending.len()
77197707 } else { 0 };
77207708
7721- let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*channel_id), None);
7709+ let logger = WithContext::from(&self.logger, Some(* counterparty_node_id), Some(*channel_id), None);
77227710 log_trace!(logger, "ChannelMonitor updated to {}. {} pending in-flight updates.",
77237711 highest_applied_update_id, remaining_in_flight);
77247712
@@ -9470,67 +9458,56 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
94709458 for monitor_event in monitor_events.drain(..) {
94719459 match monitor_event {
94729460 MonitorEvent::HTLCEvent(htlc_update) => {
9473- let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id), Some(htlc_update.payment_hash));
9461+ let logger = WithContext::from(&self.logger, Some( counterparty_node_id) , Some(channel_id), Some(htlc_update.payment_hash));
94749462 if let Some(preimage) = htlc_update.payment_preimage {
94759463 log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
94769464 self.claim_funds_internal(htlc_update.source, preimage,
94779465 htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true,
9478- false, counterparty_node_id, funding_outpoint, channel_id, None);
9466+ false, Some( counterparty_node_id) , funding_outpoint, channel_id, None);
94799467 } else {
94809468 log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
9481- let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id };
9469+ let receiver = HTLCDestination::NextHopChannel { node_id: Some( counterparty_node_id) , channel_id };
94829470 let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
94839471 self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
94849472 }
94859473 },
94869474 MonitorEvent::HolderForceClosed(_) | MonitorEvent::HolderForceClosedWithInfo { .. } => {
9487- let counterparty_node_id_opt = match counterparty_node_id {
9488- Some(cp_id) => Some(cp_id),
9489- None => {
9490- // TODO: Once we can rely on the counterparty_node_id from the
9491- // monitor event, this and the outpoint_to_peer map should be removed.
9492- let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
9493- outpoint_to_peer.get(&funding_outpoint).cloned()
9494- }
9495- };
9496- if let Some(counterparty_node_id) = counterparty_node_id_opt {
9497- let per_peer_state = self.per_peer_state.read().unwrap();
9498- if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
9499- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9500- let peer_state = &mut *peer_state_lock;
9501- let pending_msg_events = &mut peer_state.pending_msg_events;
9502- if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(channel_id) {
9503- let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
9504- reason
9505- } else {
9506- ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }
9507- };
9508- let mut shutdown_res = chan_entry.get_mut().force_shutdown(false, reason.clone());
9509- let chan = remove_channel_entry!(self, peer_state, chan_entry, shutdown_res);
9510- failed_channels.push(shutdown_res);
9511- if let Some(funded_chan) = chan.as_funded() {
9512- if let Ok(update) = self.get_channel_update_for_broadcast(funded_chan) {
9513- let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
9514- pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
9515- msg: update
9516- });
9517- }
9518- pending_msg_events.push(events::MessageSendEvent::HandleError {
9519- node_id: funded_chan.context.get_counterparty_node_id(),
9520- action: msgs::ErrorAction::DisconnectPeer {
9521- msg: Some(msgs::ErrorMessage {
9522- channel_id: funded_chan.context.channel_id(),
9523- data: reason.to_string()
9524- })
9525- },
9475+ let per_peer_state = self.per_peer_state.read().unwrap();
9476+ if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
9477+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9478+ let peer_state = &mut *peer_state_lock;
9479+ let pending_msg_events = &mut peer_state.pending_msg_events;
9480+ if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(channel_id) {
9481+ let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
9482+ reason
9483+ } else {
9484+ ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }
9485+ };
9486+ let mut shutdown_res = chan_entry.get_mut().force_shutdown(false, reason.clone());
9487+ let chan = remove_channel_entry!(self, peer_state, chan_entry, shutdown_res);
9488+ failed_channels.push(shutdown_res);
9489+ if let Some(funded_chan) = chan.as_funded() {
9490+ if let Ok(update) = self.get_channel_update_for_broadcast(funded_chan) {
9491+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
9492+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
9493+ msg: update
95269494 });
95279495 }
9496+ pending_msg_events.push(events::MessageSendEvent::HandleError {
9497+ node_id: counterparty_node_id,
9498+ action: msgs::ErrorAction::DisconnectPeer {
9499+ msg: Some(msgs::ErrorMessage {
9500+ channel_id,
9501+ data: reason.to_string()
9502+ })
9503+ },
9504+ });
95289505 }
95299506 }
95309507 }
95319508 },
95329509 MonitorEvent::Completed { funding_txo, channel_id, monitor_update_id } => {
9533- self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, counterparty_node_id.as_ref() );
9510+ self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, & counterparty_node_id);
95349511 },
95359512 }
95369513 }
@@ -13760,26 +13737,26 @@ where
1376013737 for (channel_id, monitor) in args.channel_monitors.iter() {
1376113738 if !channel_id_set.contains(channel_id) {
1376213739 let mut should_queue_fc_update = false;
13763- if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
13764- // If the ChannelMonitor had any updates, we may need to update it further and
13765- // thus track it in `closed_channel_monitor_update_ids`. If the channel never
13766- // had any updates at all, there can't be any HTLCs pending which we need to
13767- // claim.
13768- // Note that a `ChannelMonitor` is created with `update_id` 0 and after we
13769- // provide it with a closure update its `update_id` will be at 1.
13770- if !monitor.no_further_updates_allowed() || monitor.get_latest_update_id() > 1 {
13771- should_queue_fc_update = !monitor.no_further_updates_allowed();
13772- let mut latest_update_id = monitor.get_latest_update_id();
13773- if should_queue_fc_update {
13774- latest_update_id += 1;
13775- }
13776- per_peer_state.entry(counterparty_node_id)
13777- .or_insert_with(|| Mutex::new(empty_peer_state()))
13778- .lock().unwrap()
13779- .closed_channel_monitor_update_ids.entry(monitor.channel_id())
13780- .and_modify(|v| *v = cmp::max(latest_update_id, *v))
13781- .or_insert(latest_update_id);
13740+ let counterparty_node_id = monitor.get_counterparty_node_id();
13741+
13742+ // If the ChannelMonitor had any updates, we may need to update it further and
13743+ // thus track it in `closed_channel_monitor_update_ids`. If the channel never
13744+ // had any updates at all, there can't be any HTLCs pending which we need to
13745+ // claim.
13746+ // Note that a `ChannelMonitor` is created with `update_id` 0 and after we
13747+ // provide it with a closure update its `update_id` will be at 1.
13748+ if !monitor.no_further_updates_allowed() || monitor.get_latest_update_id() > 1 {
13749+ should_queue_fc_update = !monitor.no_further_updates_allowed();
13750+ let mut latest_update_id = monitor.get_latest_update_id();
13751+ if should_queue_fc_update {
13752+ latest_update_id += 1;
1378213753 }
13754+ per_peer_state.entry(counterparty_node_id)
13755+ .or_insert_with(|| Mutex::new(empty_peer_state()))
13756+ .lock().unwrap()
13757+ .closed_channel_monitor_update_ids.entry(monitor.channel_id())
13758+ .and_modify(|v| *v = cmp::max(latest_update_id, *v))
13759+ .or_insert(latest_update_id);
1378313760 }
1378413761
1378513762 if !should_queue_fc_update {
@@ -13790,31 +13767,20 @@ where
1379013767 let channel_id = monitor.channel_id();
1379113768 log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
1379213769 &channel_id);
13793- let mut monitor_update = ChannelMonitorUpdate {
13770+ let monitor_update = ChannelMonitorUpdate {
1379413771 update_id: monitor.get_latest_update_id().saturating_add(1),
13795- counterparty_node_id: None ,
13772+ counterparty_node_id: Some(counterparty_node_id) ,
1379613773 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
1379713774 channel_id: Some(monitor.channel_id()),
1379813775 };
1379913776 let funding_txo = monitor.get_funding_txo();
13800- if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
13801- let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13802- counterparty_node_id,
13803- funding_txo,
13804- channel_id,
13805- update: monitor_update,
13806- };
13807- close_background_events.push(update);
13808- } else {
13809- // This is a fairly old `ChannelMonitor` that hasn't seen an update to its
13810- // off-chain state since LDK 0.0.118 (as in LDK 0.0.119 any off-chain
13811- // `ChannelMonitorUpdate` will set the counterparty ID).
13812- // Thus, we assume that it has no pending HTLCs and we will not need to
13813- // generate a `ChannelMonitorUpdate` for it aside from this
13814- // `ChannelForceClosed` one.
13815- monitor_update.update_id = u64::MAX;
13816- close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, channel_id, monitor_update)));
13817- }
13777+ let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13778+ counterparty_node_id,
13779+ funding_txo,
13780+ channel_id,
13781+ update: monitor_update,
13782+ };
13783+ close_background_events.push(update);
1381813784 }
1381913785 }
1382013786
@@ -14373,7 +14339,7 @@ where
1437314339 // downstream chan is closed (because we don't have a
1437414340 // channel_id -> peer map entry).
1437514341 counterparty_opt.is_none(),
14376- counterparty_opt.cloned().or (monitor.get_counterparty_node_id()),
14342+ Some (monitor.get_counterparty_node_id()),
1437714343 monitor.get_funding_txo(), monitor.channel_id()))
1437814344 } else { None }
1437914345 } else {
@@ -15059,8 +15025,8 @@ mod tests {
1505915025 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1506015026
1506115027 create_announced_chan_between_nodes(&nodes, 0, 1);
15062-
15063- // Since we do not send peer storage, we manually simulate receiving a dummy
15028+
15029+ // Since we do not send peer storage, we manually simulate receiving a dummy
1506415030 // `PeerStorage` from the channel partner.
1506515031 nodes[0].node.handle_peer_storage(nodes[1].node.get_our_node_id(), msgs::PeerStorage{data: vec![0; 100]});
1506615032
0 commit comments