@@ -1139,7 +1139,7 @@ where
11391139// |
11401140// |__`peer_state`
11411141// |
1142- // |__`id_to_peer `
1142+ // |__`outpoint_to_peer `
11431143// |
11441144// |__`short_to_chan_info`
11451145// |
@@ -1233,11 +1233,7 @@ where
12331233 /// See `ChannelManager` struct-level documentation for lock order requirements.
12341234 outbound_scid_aliases: Mutex<HashSet<u64>>,
12351235
1236- /// `channel_id` -> `counterparty_node_id`.
1237- ///
1238- /// Only `channel_id`s are allowed as keys in this map, and not `temporary_channel_id`s. As
1239- /// multiple channels with the same `temporary_channel_id` to different peers can exist,
1240- /// allowing `temporary_channel_id`s in this map would cause collisions for such channels.
1236+ /// Channel funding outpoint -> `counterparty_node_id`.
12411237 ///
12421238 /// Note that this map should only be used for `MonitorEvent` handling, to be able to access
12431239 /// the corresponding channel for the event, as we only have access to the `channel_id` during
@@ -1255,7 +1251,7 @@ where
12551251 /// required to access the channel with the `counterparty_node_id`.
12561252 ///
12571253 /// See `ChannelManager` struct-level documentation for lock order requirements.
1258- id_to_peer : Mutex<HashMap<ChannelId , PublicKey>>,
1254+ outpoint_to_peer : Mutex<HashMap<OutPoint , PublicKey>>,
12591255
12601256 /// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s.
12611257 ///
@@ -1995,7 +1991,9 @@ macro_rules! handle_error {
19951991
19961992macro_rules! update_maps_on_chan_removal {
19971993 ($self: expr, $channel_context: expr) => {{
1998- $self.id_to_peer.lock().unwrap().remove(&$channel_context.channel_id());
1994+ if let Some(outpoint) = $channel_context.get_funding_txo() {
1995+ $self.outpoint_to_peer.lock().unwrap().remove(&outpoint);
1996+ }
19991997 let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
20001998 if let Some(short_id) = $channel_context.get_short_channel_id() {
20011999 short_to_chan_info.remove(&short_id);
@@ -2414,7 +2412,7 @@ where
24142412 forward_htlcs: Mutex::new(HashMap::new()),
24152413 claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: HashMap::new(), pending_claiming_payments: HashMap::new() }),
24162414 pending_intercepted_htlcs: Mutex::new(HashMap::new()),
2417- id_to_peer : Mutex::new(HashMap::new()),
2415+ outpoint_to_peer : Mutex::new(HashMap::new()),
24182416 short_to_chan_info: FairRwLock::new(HashMap::new()),
24192417
24202418 our_network_pubkey: node_signer.get_node_id(Recipient::Node).unwrap(),
@@ -2565,7 +2563,7 @@ where
25652563 fn list_funded_channels_with_filter<Fn: FnMut(&(&ChannelId, &Channel<SP>)) -> bool + Copy>(&self, f: Fn) -> Vec<ChannelDetails> {
25662564 // Allocate our best estimate of the number of channels we have in the `res`
25672565 // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
2568- // a scid or a scid alias, and the `id_to_peer ` shouldn't be used outside
2566+ // a scid or a scid alias, and the `outpoint_to_peer ` shouldn't be used outside
25692567 // of the ChannelMonitor handling. Therefore reallocations may still occur, but is
25702568 // unlikely as the `short_to_chan_info` map often contains 2 entries for
25712569 // the same channel.
@@ -2598,7 +2596,7 @@ where
25982596 pub fn list_channels(&self) -> Vec<ChannelDetails> {
25992597 // Allocate our best estimate of the number of channels we have in the `res`
26002598 // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
2601- // a scid or a scid alias, and the `id_to_peer ` shouldn't be used outside
2599+ // a scid or a scid alias, and the `outpoint_to_peer ` shouldn't be used outside
26022600 // of the ChannelMonitor handling. Therefore reallocations may still occur, but is
26032601 // unlikely as the `short_to_chan_info` map often contains 2 entries for
26042602 // the same channel.
@@ -3716,9 +3714,10 @@ where
37163714
37173715 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
37183716 let peer_state = &mut *peer_state_lock;
3717+ let funding_txo;
37193718 let (chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) {
37203719 Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => {
3721- let funding_txo = find_funding_output(&chan, &funding_transaction)?;
3720+ funding_txo = find_funding_output(&chan, &funding_transaction)?;
37223721
37233722 let logger = WithChannelContext::from(&self.logger, &chan.context);
37243723 let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger)
@@ -3766,9 +3765,9 @@ where
37663765 panic!("Generated duplicate funding txid?");
37673766 },
37683767 hash_map::Entry::Vacant(e) => {
3769- let mut id_to_peer = self.id_to_peer .lock().unwrap();
3770- if id_to_peer .insert(chan.context.channel_id() , chan.context.get_counterparty_node_id()).is_some() {
3771- panic!("id_to_peer map already contained funding txid , which shouldn't be possible");
3768+ let mut outpoint_to_peer = self.outpoint_to_peer .lock().unwrap();
3769+ if outpoint_to_peer .insert(funding_txo , chan.context.get_counterparty_node_id()).is_some() {
3770+ panic!("outpoint_to_peer map already contained funding outpoint , which shouldn't be possible");
37723771 }
37733772 e.insert(ChannelPhase::UnfundedOutboundV1(chan));
37743773 }
@@ -5851,9 +5850,9 @@ where
58515850 Some(cp_id) => cp_id.clone(),
58525851 None => {
58535852 // TODO: Once we can rely on the counterparty_node_id from the
5854- // monitor event, this and the id_to_peer map should be removed.
5855- let id_to_peer = self.id_to_peer .lock().unwrap();
5856- match id_to_peer .get(&funding_txo.to_channel_id() ) {
5853+ // monitor event, this and the outpoint_to_peer map should be removed.
5854+ let outpoint_to_peer = self.outpoint_to_peer .lock().unwrap();
5855+ match outpoint_to_peer .get(&funding_txo) {
58575856 Some(cp_id) => cp_id.clone(),
58585857 None => return,
58595858 }
@@ -6237,8 +6236,8 @@ where
62376236 ))
62386237 },
62396238 hash_map::Entry::Vacant(e) => {
6240- let mut id_to_peer_lock = self.id_to_peer .lock().unwrap();
6241- match id_to_peer_lock .entry(chan.context.channel_id() ) {
6239+ let mut outpoint_to_peer_lock = self.outpoint_to_peer .lock().unwrap();
6240+ match outpoint_to_peer_lock .entry(monitor.get_funding_txo().0 ) {
62426241 hash_map::Entry::Occupied(_) => {
62436242 return Err(MsgHandleErrInternal::send_err_msg_no_close(
62446243 "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
@@ -6248,7 +6247,7 @@ where
62486247 let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
62496248 if let Ok(persist_state) = monitor_res {
62506249 i_e.insert(chan.context.get_counterparty_node_id());
6251- mem::drop(id_to_peer_lock );
6250+ mem::drop(outpoint_to_peer_lock );
62526251
62536252 // There's no problem signing a counterparty's funding transaction if our monitor
62546253 // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
@@ -7142,9 +7141,9 @@ where
71427141 Some(cp_id) => Some(cp_id),
71437142 None => {
71447143 // TODO: Once we can rely on the counterparty_node_id from the
7145- // monitor event, this and the id_to_peer map should be removed.
7146- let id_to_peer = self.id_to_peer .lock().unwrap();
7147- id_to_peer .get(&funding_outpoint.to_channel_id() ).cloned()
7144+ // monitor event, this and the outpoint_to_peer map should be removed.
7145+ let outpoint_to_peer = self.outpoint_to_peer .lock().unwrap();
7146+ outpoint_to_peer .get(&funding_outpoint).cloned()
71487147 }
71497148 };
71507149 if let Some(counterparty_node_id) = counterparty_node_id_opt {
@@ -10081,7 +10080,7 @@ where
1008110080 let channel_count: u64 = Readable::read(reader)?;
1008210081 let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
1008310082 let mut funded_peer_channels: HashMap<PublicKey, HashMap<ChannelId, ChannelPhase<SP>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
10084- let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
10083+ let mut outpoint_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
1008510084 let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
1008610085 let mut channel_closures = VecDeque::new();
1008710086 let mut close_background_events = Vec::new();
@@ -10159,8 +10158,8 @@ where
1015910158 if let Some(short_channel_id) = channel.context.get_short_channel_id() {
1016010159 short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
1016110160 }
10162- if channel.context.is_funding_broadcast () {
10163- id_to_peer .insert(channel.context.channel_id() , channel.context.get_counterparty_node_id());
10161+ if let Some(funding_txo) = channel.context.get_funding_txo () {
10162+ outpoint_to_peer .insert(funding_txo , channel.context.get_counterparty_node_id());
1016410163 }
1016510164 match funded_peer_channels.entry(channel.context.get_counterparty_node_id()) {
1016610165 hash_map::Entry::Occupied(mut entry) => {
@@ -10494,7 +10493,7 @@ where
1049410493 // We only rebuild the pending payments map if we were most recently serialized by
1049510494 // 0.0.102+
1049610495 for (_, monitor) in args.channel_monitors.iter() {
10497- let counterparty_opt = id_to_peer .get(&monitor.get_funding_txo().0.to_channel_id() );
10496+ let counterparty_opt = outpoint_to_peer .get(&monitor.get_funding_txo().0);
1049810497 if counterparty_opt.is_none() {
1049910498 let logger = WithChannelMonitor::from(&args.logger, monitor);
1050010499 for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() {
@@ -10787,7 +10786,7 @@ where
1078710786 // without the new monitor persisted - we'll end up right back here on
1078810787 // restart.
1078910788 let previous_channel_id = claimable_htlc.prev_hop.outpoint.to_channel_id();
10790- if let Some(peer_node_id) = id_to_peer .get(&previous_channel_id) {
10789+ if let Some(peer_node_id) = outpoint_to_peer .get(&claimable_htlc.prev_hop.outpoint) {
1079110790 let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap();
1079210791 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
1079310792 let peer_state = &mut *peer_state_lock;
@@ -10865,7 +10864,7 @@ where
1086510864 forward_htlcs: Mutex::new(forward_htlcs),
1086610865 claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap() }),
1086710866 outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
10868- id_to_peer : Mutex::new(id_to_peer ),
10867+ outpoint_to_peer : Mutex::new(outpoint_to_peer ),
1086910868 short_to_chan_info: FairRwLock::new(short_to_chan_info),
1087010869 fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(),
1087110870
@@ -11482,8 +11481,8 @@ mod tests {
1148211481 }
1148311482
1148411483 #[test]
11485- fn test_id_to_peer_coverage () {
11486- // Test that the `ChannelManager:id_to_peer ` contains channels which have been assigned
11484+ fn test_outpoint_to_peer_coverage () {
11485+ // Test that the `ChannelManager:outpoint_to_peer ` contains channels which have been assigned
1148711486 // a `channel_id` (i.e. have had the funding tx created), and that they are removed once
1148811487 // the channel is successfully closed.
1148911488 let chanmon_cfgs = create_chanmon_cfgs(2);
@@ -11497,42 +11496,42 @@ mod tests {
1149711496 let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
1149811497 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
1149911498
11500- let (temporary_channel_id, tx, _funding_output ) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
11499+ let (temporary_channel_id, tx, funding_output ) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
1150111500 let channel_id = ChannelId::from_bytes(tx.txid().to_byte_array());
1150211501 {
11503- // Ensure that the `id_to_peer ` map is empty until either party has received the
11502+ // Ensure that the `outpoint_to_peer ` map is empty until either party has received the
1150411503 // funding transaction, and have the real `channel_id`.
11505- assert_eq!(nodes[0].node.id_to_peer .lock().unwrap().len(), 0);
11506- assert_eq!(nodes[1].node.id_to_peer .lock().unwrap().len(), 0);
11504+ assert_eq!(nodes[0].node.outpoint_to_peer .lock().unwrap().len(), 0);
11505+ assert_eq!(nodes[1].node.outpoint_to_peer .lock().unwrap().len(), 0);
1150711506 }
1150811507
1150911508 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
1151011509 {
11511- // Assert that `nodes[0]`'s `id_to_peer ` map is populated with the channel as soon as
11510+ // Assert that `nodes[0]`'s `outpoint_to_peer ` map is populated with the channel as soon as
1151211511 // as it has the funding transaction.
11513- let nodes_0_lock = nodes[0].node.id_to_peer .lock().unwrap();
11512+ let nodes_0_lock = nodes[0].node.outpoint_to_peer .lock().unwrap();
1151411513 assert_eq!(nodes_0_lock.len(), 1);
11515- assert!(nodes_0_lock.contains_key(&channel_id ));
11514+ assert!(nodes_0_lock.contains_key(&funding_output ));
1151611515 }
1151711516
11518- assert_eq!(nodes[1].node.id_to_peer .lock().unwrap().len(), 0);
11517+ assert_eq!(nodes[1].node.outpoint_to_peer .lock().unwrap().len(), 0);
1151911518
1152011519 let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
1152111520
1152211521 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
1152311522 {
11524- let nodes_0_lock = nodes[0].node.id_to_peer .lock().unwrap();
11523+ let nodes_0_lock = nodes[0].node.outpoint_to_peer .lock().unwrap();
1152511524 assert_eq!(nodes_0_lock.len(), 1);
11526- assert!(nodes_0_lock.contains_key(&channel_id ));
11525+ assert!(nodes_0_lock.contains_key(&funding_output ));
1152711526 }
1152811527 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
1152911528
1153011529 {
11531- // Assert that `nodes[1]`'s `id_to_peer ` map is populated with the channel as soon as
11532- // as it has the funding transaction.
11533- let nodes_1_lock = nodes[1].node.id_to_peer .lock().unwrap();
11530+ // Assert that `nodes[1]`'s `outpoint_to_peer ` map is populated with the channel as
11531+ // soon as it has the funding transaction.
11532+ let nodes_1_lock = nodes[1].node.outpoint_to_peer .lock().unwrap();
1153411533 assert_eq!(nodes_1_lock.len(), 1);
11535- assert!(nodes_1_lock.contains_key(&channel_id ));
11534+ assert!(nodes_1_lock.contains_key(&funding_output ));
1153611535 }
1153711536 check_added_monitors!(nodes[1], 1);
1153811537 let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
@@ -11551,48 +11550,48 @@ mod tests {
1155111550 let closing_signed_node_0 = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
1155211551 nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0);
1155311552 {
11554- // Assert that the channel is kept in the `id_to_peer ` map for both nodes until the
11553+ // Assert that the channel is kept in the `outpoint_to_peer ` map for both nodes until the
1155511554 // channel can be fully closed by both parties (i.e. no outstanding htlcs exists, the
1155611555 // fee for the closing transaction has been negotiated and the parties has the other
1155711556 // party's signature for the fee negotiated closing transaction.)
11558- let nodes_0_lock = nodes[0].node.id_to_peer .lock().unwrap();
11557+ let nodes_0_lock = nodes[0].node.outpoint_to_peer .lock().unwrap();
1155911558 assert_eq!(nodes_0_lock.len(), 1);
11560- assert!(nodes_0_lock.contains_key(&channel_id ));
11559+ assert!(nodes_0_lock.contains_key(&funding_output ));
1156111560 }
1156211561
1156311562 {
1156411563 // At this stage, `nodes[1]` has proposed a fee for the closing transaction in the
1156511564 // `handle_closing_signed` call above. As `nodes[1]` has not yet received the signature
1156611565 // from `nodes[0]` for the closing transaction with the proposed fee, the channel is
11567- // kept in the `nodes[1]`'s `id_to_peer ` map.
11568- let nodes_1_lock = nodes[1].node.id_to_peer .lock().unwrap();
11566+ // kept in the `nodes[1]`'s `outpoint_to_peer ` map.
11567+ let nodes_1_lock = nodes[1].node.outpoint_to_peer .lock().unwrap();
1156911568 assert_eq!(nodes_1_lock.len(), 1);
11570- assert!(nodes_1_lock.contains_key(&channel_id ));
11569+ assert!(nodes_1_lock.contains_key(&funding_output ));
1157111570 }
1157211571
1157311572 nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()));
1157411573 {
1157511574 // `nodes[0]` accepts `nodes[1]`'s proposed fee for the closing transaction, and
1157611575 // therefore has all it needs to fully close the channel (both signatures for the
1157711576 // closing transaction).
11578- // Assert that the channel is removed from `nodes[0]`'s `id_to_peer ` map as it can be
11577+ // Assert that the channel is removed from `nodes[0]`'s `outpoint_to_peer ` map as it can be
1157911578 // fully closed by `nodes[0]`.
11580- assert_eq!(nodes[0].node.id_to_peer .lock().unwrap().len(), 0);
11579+ assert_eq!(nodes[0].node.outpoint_to_peer .lock().unwrap().len(), 0);
1158111580
11582- // Assert that the channel is still in `nodes[1]`'s `id_to_peer ` map, as `nodes[1]`
11581+ // Assert that the channel is still in `nodes[1]`'s `outpoint_to_peer ` map, as `nodes[1]`
1158311582 // doesn't have `nodes[0]`'s signature for the closing transaction yet.
11584- let nodes_1_lock = nodes[1].node.id_to_peer .lock().unwrap();
11583+ let nodes_1_lock = nodes[1].node.outpoint_to_peer .lock().unwrap();
1158511584 assert_eq!(nodes_1_lock.len(), 1);
11586- assert!(nodes_1_lock.contains_key(&channel_id ));
11585+ assert!(nodes_1_lock.contains_key(&funding_output ));
1158711586 }
1158811587
1158911588 let (_nodes_0_update, closing_signed_node_0) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
1159011589
1159111590 nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0.unwrap());
1159211591 {
11593- // Assert that the channel has now been removed from both parties `id_to_peer ` map once
11592+ // Assert that the channel has now been removed from both parties `outpoint_to_peer ` map once
1159411593 // they both have everything required to fully close the channel.
11595- assert_eq!(nodes[1].node.id_to_peer .lock().unwrap().len(), 0);
11594+ assert_eq!(nodes[1].node.outpoint_to_peer .lock().unwrap().len(), 0);
1159611595 }
1159711596 let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
1159811597
0 commit comments