diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index cf18fa83afa..4ef9a1c7721 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -43,7 +43,6 @@ use lightning::chain::{ chainmonitor, channelmonitor, BestBlock, ChannelMonitorUpdateStatus, Confirm, Watch, }; use lightning::events; -use lightning::events::MessageSendEventsProvider; use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE; use lightning::ln::channel_state::ChannelDetails; use lightning::ln::channelmanager::{ @@ -52,7 +51,10 @@ use lightning::ln::channelmanager::{ }; use lightning::ln::functional_test_utils::*; use lightning::ln::inbound_payment::ExpandedKey; -use lightning::ln::msgs::{ChannelMessageHandler, CommitmentUpdate, Init, UpdateAddHTLC}; +use lightning::ln::msgs::{ + BaseMessageHandler, ChannelMessageHandler, CommitmentUpdate, Init, MessageSendEvent, + UpdateAddHTLC, +}; use lightning::ln::script::ShutdownScript; use lightning::ln::types::ChannelId; use lightning::offers::invoice::UnsignedBolt12Invoice; @@ -768,7 +770,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let open_channel = { let events = $source.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - if let events::MessageSendEvent::SendOpenChannel { ref msg, .. } = events[0] { + if let MessageSendEvent::SendOpenChannel { ref msg, .. } = events[0] { msg.clone() } else { panic!("Wrong event type"); @@ -804,7 +806,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } let events = $dest.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - if let events::MessageSendEvent::SendAcceptChannel { ref msg, .. } = events[0] { + if let MessageSendEvent::SendAcceptChannel { ref msg, .. } = events[0] { msg.clone() } else { panic!("Wrong event type"); @@ -847,7 +849,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let funding_created = { let events = $source.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - if let events::MessageSendEvent::SendFundingCreated { ref msg, .. } = events[0] { + if let MessageSendEvent::SendFundingCreated { ref msg, .. } = events[0] { msg.clone() } else { panic!("Wrong event type"); @@ -858,7 +860,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let funding_signed = { let events = $dest.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - if let events::MessageSendEvent::SendFundingSigned { ref msg, .. } = events[0] { + if let MessageSendEvent::SendFundingSigned { ref msg, .. } = events[0] { msg.clone() } else { panic!("Wrong event type"); @@ -913,9 +915,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } for (idx, node_event) in node_events.iter().enumerate() { for event in node_event { - if let events::MessageSendEvent::SendChannelReady { ref node_id, ref msg } = - event - { + if let MessageSendEvent::SendChannelReady { ref node_id, ref msg } = event { for node in $nodes.iter() { if node.get_our_node_id() == *node_id { node.handle_channel_ready($nodes[idx].get_our_node_id(), msg); @@ -930,7 +930,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { for node in $nodes.iter() { let events = node.get_and_clear_pending_msg_events(); for event in events { - if let events::MessageSendEvent::SendAnnouncementSignatures { .. } = event { + if let MessageSendEvent::SendAnnouncementSignatures { .. } = event { } else { panic!("Wrong event type"); } @@ -1015,25 +1015,25 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let expect_drop_id = if let Some(id) = expect_drop_node { Some(nodes[id].get_our_node_id()) } else { None }; for event in $excess_events { let push_a = match event { - events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => { + MessageSendEvent::UpdateHTLCs { ref node_id, .. } => { if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } *node_id == a_id }, - events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => { + MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => { if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } *node_id == a_id }, - events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => { + MessageSendEvent::SendChannelReestablish { ref node_id, .. } => { if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } *node_id == a_id }, - events::MessageSendEvent::SendStfu { ref node_id, .. } => { + MessageSendEvent::SendStfu { ref node_id, .. } => { if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } *node_id == a_id }, - events::MessageSendEvent::SendChannelReady { .. } => continue, - events::MessageSendEvent::SendAnnouncementSignatures { .. } => continue, - events::MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => { + MessageSendEvent::SendChannelReady { .. } => continue, + MessageSendEvent::SendAnnouncementSignatures { .. } => continue, + MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => { assert_eq!(msg.contents.channel_flags & 2, 0); // The disable bit must never be set! if Some(*node_id) == expect_drop_id { panic!("peer_disconnected should drop msgs bound for the disconnected peer"); } *node_id == a_id @@ -1089,7 +1089,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { for event in &mut events_iter { had_events = true; match event { - events::MessageSendEvent::UpdateHTLCs { node_id, updates: CommitmentUpdate { update_add_htlcs, update_fail_htlcs, update_fulfill_htlcs, update_fail_malformed_htlcs, update_fee, commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { node_id, updates: CommitmentUpdate { update_add_htlcs, update_fail_htlcs, update_fulfill_htlcs, update_fail_malformed_htlcs, update_fee, commitment_signed } } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == node_id { for update_add in update_add_htlcs.iter() { @@ -1127,7 +1127,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { !update_fail_htlcs.is_empty() || !update_fail_malformed_htlcs.is_empty(); if $limit_events != ProcessMessages::AllMessages && processed_change { // If we only want to process some messages, don't deliver the CS until later. - extra_ev = Some(events::MessageSendEvent::UpdateHTLCs { node_id, updates: CommitmentUpdate { + extra_ev = Some(MessageSendEvent::UpdateHTLCs { node_id, updates: CommitmentUpdate { update_add_htlcs: Vec::new(), update_fail_htlcs: Vec::new(), update_fulfill_htlcs: Vec::new(), @@ -1143,7 +1143,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } } }, - events::MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { + MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { out.locked_write(format!("Delivering revoke_and_ack from node {} to node {}.\n", $node, idx).as_bytes()); @@ -1151,7 +1151,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } } }, - events::MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { + MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { out.locked_write(format!("Delivering channel_reestablish from node {} to node {}.\n", $node, idx).as_bytes()); @@ -1159,7 +1159,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } } }, - events::MessageSendEvent::SendStfu { ref node_id, ref msg } => { + MessageSendEvent::SendStfu { ref node_id, ref msg } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { out.locked_write(format!("Delivering stfu from node {} to node {}.\n", $node, idx).as_bytes()); @@ -1167,13 +1167,13 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } } } - events::MessageSendEvent::SendChannelReady { .. } => { + MessageSendEvent::SendChannelReady { .. } => { // Can be generated as a reestablish response }, - events::MessageSendEvent::SendAnnouncementSignatures { .. } => { + MessageSendEvent::SendAnnouncementSignatures { .. } => { // Can be generated as a reestablish response }, - events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => { + MessageSendEvent::SendChannelUpdate { ref msg, .. } => { // When we reconnect we will resend a channel_update to make sure our // counterparty has the latest parameters for receiving payments // through us. We do, however, check that the message does not include @@ -1216,13 +1216,13 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { if $counterparty_id == 0 { for event in nodes[0].get_and_clear_pending_msg_events() { match event { - events::MessageSendEvent::UpdateHTLCs { .. } => {}, - events::MessageSendEvent::SendRevokeAndACK { .. } => {}, - events::MessageSendEvent::SendChannelReestablish { .. } => {}, - events::MessageSendEvent::SendStfu { .. } => {}, - events::MessageSendEvent::SendChannelReady { .. } => {}, - events::MessageSendEvent::SendAnnouncementSignatures { .. } => {}, - events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => { + MessageSendEvent::UpdateHTLCs { .. } => {}, + MessageSendEvent::SendRevokeAndACK { .. } => {}, + MessageSendEvent::SendChannelReestablish { .. } => {}, + MessageSendEvent::SendStfu { .. } => {}, + MessageSendEvent::SendChannelReady { .. } => {}, + MessageSendEvent::SendAnnouncementSignatures { .. } => {}, + MessageSendEvent::SendChannelUpdate { ref msg, .. } => { assert_eq!(msg.contents.channel_flags & 2, 0); // The disable bit must never be set! }, _ => { @@ -1243,13 +1243,13 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } else { for event in nodes[2].get_and_clear_pending_msg_events() { match event { - events::MessageSendEvent::UpdateHTLCs { .. } => {}, - events::MessageSendEvent::SendRevokeAndACK { .. } => {}, - events::MessageSendEvent::SendChannelReestablish { .. } => {}, - events::MessageSendEvent::SendStfu { .. } => {}, - events::MessageSendEvent::SendChannelReady { .. } => {}, - events::MessageSendEvent::SendAnnouncementSignatures { .. } => {}, - events::MessageSendEvent::SendChannelUpdate { ref msg, .. } => { + MessageSendEvent::UpdateHTLCs { .. } => {}, + MessageSendEvent::SendRevokeAndACK { .. } => {}, + MessageSendEvent::SendChannelReestablish { .. } => {}, + MessageSendEvent::SendStfu { .. } => {}, + MessageSendEvent::SendChannelReady { .. } => {}, + MessageSendEvent::SendAnnouncementSignatures { .. } => {}, + MessageSendEvent::SendChannelUpdate { ref msg, .. } => { assert_eq!(msg.contents.channel_flags & 2, 0); // The disable bit must never be set! }, _ => { diff --git a/fuzz/src/onion_message.rs b/fuzz/src/onion_message.rs index c2790d527b0..71836995017 100644 --- a/fuzz/src/onion_message.rs +++ b/fuzz/src/onion_message.rs @@ -10,7 +10,7 @@ use lightning::blinded_path::message::{ }; use lightning::blinded_path::EmptyNodeIdLookUp; use lightning::ln::inbound_payment::ExpandedKey; -use lightning::ln::msgs::{self, OnionMessageHandler}; +use lightning::ln::msgs::{self, BaseMessageHandler, DecodeError, OnionMessageHandler}; use lightning::ln::peer_handler::IgnoringMessageHandler; use lightning::ln::script::ShutdownScript; use lightning::offers::invoice::UnsignedBolt12Invoice; @@ -170,7 +170,7 @@ impl CustomOnionMessageHandler for TestCustomMessageHandler { } fn read_custom_message( &self, _message_type: u64, buffer: &mut R, - ) -> Result, msgs::DecodeError> { + ) -> Result, DecodeError> { let mut buf = Vec::new(); buffer.read_to_limit(&mut buf, u64::MAX)?; return Ok(Some(TestCustomMessage {})); diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 2e8c49ebd28..46d990bb37e 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -1070,15 +1070,13 @@ mod tests { use lightning::chain::channelmonitor::ANTI_REORG_DELAY; use lightning::chain::transaction::OutPoint; use lightning::chain::{chainmonitor, BestBlock, Confirm, Filter}; - use lightning::events::{ - Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, ReplayEvent, - }; + use lightning::events::{Event, PathFailure, ReplayEvent}; use lightning::ln::channelmanager; use lightning::ln::channelmanager::{ ChainParameters, PaymentId, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, }; use lightning::ln::functional_test_utils::*; - use lightning::ln::msgs::{ChannelMessageHandler, Init}; + use lightning::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, Init, MessageSendEvent}; use lightning::ln::peer_handler::{ IgnoringMessageHandler, MessageHandler, PeerManager, SocketDescriptor, }; diff --git a/lightning-dns-resolver/src/lib.rs b/lightning-dns-resolver/src/lib.rs index 48d88cd43f7..6de6b77be90 100644 --- a/lightning-dns-resolver/src/lib.rs +++ b/lightning-dns-resolver/src/lib.rs @@ -164,7 +164,9 @@ mod test { use lightning::events::{Event, PaymentPurpose}; use lightning::ln::channelmanager::{PaymentId, Retry}; use lightning::ln::functional_test_utils::*; - use lightning::ln::msgs::{ChannelMessageHandler, Init, OnionMessageHandler}; + use lightning::ln::msgs::{ + BaseMessageHandler, ChannelMessageHandler, Init, OnionMessageHandler, + }; use lightning::ln::peer_handler::IgnoringMessageHandler; use lightning::onion_message::dns_resolution::{HumanReadableName, OMNameResolver}; use lightning::onion_message::messenger::{ diff --git a/lightning-liquidity/tests/common/mod.rs b/lightning-liquidity/tests/common/mod.rs index 8b8507a9f14..f114f7b9c89 100644 --- a/lightning-liquidity/tests/common/mod.rs +++ b/lightning-liquidity/tests/common/mod.rs @@ -15,7 +15,7 @@ use lightning::chain::{chainmonitor, BestBlock, Confirm}; use lightning::ln::channelmanager; use lightning::ln::channelmanager::ChainParameters; use lightning::ln::functional_test_utils::*; -use lightning::ln::msgs::{ChannelMessageHandler, Init}; +use lightning::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, Init}; use lightning::ln::peer_handler::{ IgnoringMessageHandler, MessageHandler, PeerManager, SocketDescriptor, }; diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 6c6d8a716d9..a0daa8235b5 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -623,7 +623,6 @@ mod tests { use bitcoin::constants::ChainHash; use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey}; use bitcoin::Network; - use lightning::events::*; use lightning::ln::msgs::*; use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler, PeerManager}; use lightning::routing::gossip::NodeId; @@ -684,12 +683,6 @@ mod tests { ) -> Option { None } - fn peer_connected( - &self, _their_node_id: PublicKey, _init_msg: &Init, _inbound: bool, - ) -> Result<(), ()> { - Ok(()) - } - fn peer_disconnected(&self, _their_node_id: PublicKey) {} fn handle_reply_channel_range( &self, _their_node_id: PublicKey, _msg: ReplyChannelRange, ) -> Result<(), LightningError> { @@ -710,12 +703,6 @@ mod tests { ) -> Result<(), LightningError> { Ok(()) } - fn provided_node_features(&self) -> NodeFeatures { - NodeFeatures::empty() - } - fn provided_init_features(&self, _their_node_id: PublicKey) -> InitFeatures { - InitFeatures::empty() - } fn processing_queue_high(&self) -> bool { false } @@ -766,35 +753,39 @@ mod tests { &self, _their_node_id: PublicKey, _msg: PeerStorageRetrieval, ) { } + fn handle_channel_reestablish(&self, _their_node_id: PublicKey, _msg: &ChannelReestablish) { + } + fn handle_error(&self, _their_node_id: PublicKey, _msg: &ErrorMessage) {} + fn get_chain_hashes(&self) -> Option> { + Some(vec![ChainHash::using_genesis_block(Network::Testnet)]) + } + fn message_received(&self) {} + } + impl BaseMessageHandler for MsgHandler { fn peer_disconnected(&self, their_node_id: PublicKey) { if their_node_id == self.expected_pubkey { self.disconnected_flag.store(true, Ordering::SeqCst); - self.pubkey_disconnected.clone().try_send(()).unwrap(); + // This method is called twice as we're two message handlers. `try_send` will fail + // the second time. + let _ = self.pubkey_disconnected.clone().try_send(()); } } fn peer_connected( &self, their_node_id: PublicKey, _init_msg: &Init, _inbound: bool, ) -> Result<(), ()> { if their_node_id == self.expected_pubkey { - self.pubkey_connected.clone().try_send(()).unwrap(); + // This method is called twice as we're two message handlers. `try_send` will fail + // the second time. + let _ = self.pubkey_connected.clone().try_send(()); } Ok(()) } - fn handle_channel_reestablish(&self, _their_node_id: PublicKey, _msg: &ChannelReestablish) { - } - fn handle_error(&self, _their_node_id: PublicKey, _msg: &ErrorMessage) {} fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() } fn provided_init_features(&self, _their_node_id: PublicKey) -> InitFeatures { InitFeatures::empty() } - fn get_chain_hashes(&self) -> Option> { - Some(vec![ChainHash::using_genesis_block(Network::Testnet)]) - } - fn message_received(&self) {} - } - impl MessageSendEventsProvider for MsgHandler { fn get_and_clear_pending_msg_events(&self) -> Vec { let mut ret = Vec::new(); mem::swap(&mut *self.msg_events.lock().unwrap(), &mut ret); diff --git a/lightning-persister/src/fs_store.rs b/lightning-persister/src/fs_store.rs index e396b1fcec7..850a0786671 100644 --- a/lightning-persister/src/fs_store.rs +++ b/lightning-persister/src/fs_store.rs @@ -501,8 +501,9 @@ mod tests { use lightning::chain::chainmonitor::Persist; use lightning::chain::ChannelMonitorUpdateStatus; use lightning::check_closed_event; - use lightning::events::{ClosureReason, MessageSendEventsProvider}; + use lightning::events::ClosureReason; use lightning::ln::functional_test_utils::*; + use lightning::ln::msgs::BaseMessageHandler; use lightning::util::persist::read_channel_monitors; use lightning::util::test_utils; diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 5779e7597af..4dbe3e07ce2 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -928,9 +928,9 @@ mod tests { use crate::{get_htlc_update_msgs, get_revoke_commit_msgs}; use crate::chain::{ChannelMonitorUpdateStatus, Watch}; use crate::chain::channelmonitor::ANTI_REORG_DELAY; - use crate::events::{ClosureReason, Event, MessageSendEvent, MessageSendEventsProvider}; + use crate::events::{ClosureReason, Event}; use crate::ln::functional_test_utils::*; - use crate::ln::msgs::ChannelMessageHandler; + use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; const CHAINSYNC_MONITOR_PARTITION_FACTOR: u32 = 5; diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index 4ff8b7f0d5d..a0f26bfbac0 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -2360,308 +2360,6 @@ impl MaybeReadable for Event { } } -/// An event generated by ChannelManager which indicates a message should be sent to a peer (or -/// broadcast to most peers). -/// These events are handled by PeerManager::process_events if you are using a PeerManager. -#[derive(Clone, Debug)] -#[cfg_attr(any(test, feature = "_test_utils"), derive(PartialEq))] -pub enum MessageSendEvent { - /// Used to indicate that we've accepted a channel open and should send the accept_channel - /// message provided to the given peer. - SendAcceptChannel { - /// The node_id of the node which should receive this message - node_id: PublicKey, - /// The message which should be sent. - msg: msgs::AcceptChannel, - }, - /// Used to indicate that we've accepted a V2 channel open and should send the accept_channel2 - /// message provided to the given peer. - SendAcceptChannelV2 { - /// The node_id of the node which should receive this message - node_id: PublicKey, - /// The message which should be sent. - msg: msgs::AcceptChannelV2, - }, - /// Used to indicate that we've initiated a channel open and should send the open_channel - /// message provided to the given peer. - SendOpenChannel { - /// The node_id of the node which should receive this message - node_id: PublicKey, - /// The message which should be sent. - msg: msgs::OpenChannel, - }, - /// Used to indicate that we've initiated a V2 channel open and should send the open_channel2 - /// message provided to the given peer. - SendOpenChannelV2 { - /// The node_id of the node which should receive this message - node_id: PublicKey, - /// The message which should be sent. - msg: msgs::OpenChannelV2, - }, - /// Used to indicate that a funding_created message should be sent to the peer with the given node_id. - SendFundingCreated { - /// The node_id of the node which should receive this message - node_id: PublicKey, - /// The message which should be sent. - msg: msgs::FundingCreated, - }, - /// Used to indicate that a funding_signed message should be sent to the peer with the given node_id. - SendFundingSigned { - /// The node_id of the node which should receive this message - node_id: PublicKey, - /// The message which should be sent. - msg: msgs::FundingSigned, - }, - /// Used to indicate that a stfu message should be sent to the peer with the given node id. - SendStfu { - /// The node_id of the node which should receive this message - node_id: PublicKey, - /// The message which should be sent. - msg: msgs::Stfu, - }, - /// Used to indicate that a splice_init message should be sent to the peer with the given node id. - SendSpliceInit { - /// The node_id of the node which should receive this message - node_id: PublicKey, - /// The message which should be sent. - msg: msgs::SpliceInit, - }, - /// Used to indicate that a splice_ack message should be sent to the peer with the given node id. - SendSpliceAck { - /// The node_id of the node which should receive this message - node_id: PublicKey, - /// The message which should be sent. - msg: msgs::SpliceAck, - }, - /// Used to indicate that a splice_locked message should be sent to the peer with the given node id. - SendSpliceLocked { - /// The node_id of the node which should receive this message - node_id: PublicKey, - /// The message which should be sent. - msg: msgs::SpliceLocked, - }, - /// Used to indicate that a tx_add_input message should be sent to the peer with the given node_id. - SendTxAddInput { - /// The node_id of the node which should receive this message - node_id: PublicKey, - /// The message which should be sent. - msg: msgs::TxAddInput, - }, - /// Used to indicate that a tx_add_output message should be sent to the peer with the given node_id. - SendTxAddOutput { - /// The node_id of the node which should receive this message - node_id: PublicKey, - /// The message which should be sent. - msg: msgs::TxAddOutput, - }, - /// Used to indicate that a tx_remove_input message should be sent to the peer with the given node_id. - SendTxRemoveInput { - /// The node_id of the node which should receive this message - node_id: PublicKey, - /// The message which should be sent. - msg: msgs::TxRemoveInput, - }, - /// Used to indicate that a tx_remove_output message should be sent to the peer with the given node_id. - SendTxRemoveOutput { - /// The node_id of the node which should receive this message - node_id: PublicKey, - /// The message which should be sent. - msg: msgs::TxRemoveOutput, - }, - /// Used to indicate that a tx_complete message should be sent to the peer with the given node_id. - SendTxComplete { - /// The node_id of the node which should receive this message - node_id: PublicKey, - /// The message which should be sent. - msg: msgs::TxComplete, - }, - /// Used to indicate that a tx_signatures message should be sent to the peer with the given node_id. - SendTxSignatures { - /// The node_id of the node which should receive this message - node_id: PublicKey, - /// The message which should be sent. - msg: msgs::TxSignatures, - }, - /// Used to indicate that a tx_init_rbf message should be sent to the peer with the given node_id. - SendTxInitRbf { - /// The node_id of the node which should receive this message - node_id: PublicKey, - /// The message which should be sent. - msg: msgs::TxInitRbf, - }, - /// Used to indicate that a tx_ack_rbf message should be sent to the peer with the given node_id. - SendTxAckRbf { - /// The node_id of the node which should receive this message - node_id: PublicKey, - /// The message which should be sent. - msg: msgs::TxAckRbf, - }, - /// Used to indicate that a tx_abort message should be sent to the peer with the given node_id. - SendTxAbort { - /// The node_id of the node which should receive this message - node_id: PublicKey, - /// The message which should be sent. - msg: msgs::TxAbort, - }, - /// Used to indicate that a channel_ready message should be sent to the peer with the given node_id. - SendChannelReady { - /// The node_id of the node which should receive these message(s) - node_id: PublicKey, - /// The channel_ready message which should be sent. - msg: msgs::ChannelReady, - }, - /// Used to indicate that an announcement_signatures message should be sent to the peer with the given node_id. - SendAnnouncementSignatures { - /// The node_id of the node which should receive these message(s) - node_id: PublicKey, - /// The announcement_signatures message which should be sent. - msg: msgs::AnnouncementSignatures, - }, - /// Used to indicate that a series of HTLC update messages, as well as a commitment_signed - /// message should be sent to the peer with the given node_id. - UpdateHTLCs { - /// The node_id of the node which should receive these message(s) - node_id: PublicKey, - /// The update messages which should be sent. ALL messages in the struct should be sent! - updates: msgs::CommitmentUpdate, - }, - /// Used to indicate that a revoke_and_ack message should be sent to the peer with the given node_id. - SendRevokeAndACK { - /// The node_id of the node which should receive this message - node_id: PublicKey, - /// The message which should be sent. - msg: msgs::RevokeAndACK, - }, - /// Used to indicate that a closing_signed message should be sent to the peer with the given node_id. - SendClosingSigned { - /// The node_id of the node which should receive this message - node_id: PublicKey, - /// The message which should be sent. - msg: msgs::ClosingSigned, - }, - /// Used to indicate that a shutdown message should be sent to the peer with the given node_id. - SendShutdown { - /// The node_id of the node which should receive this message - node_id: PublicKey, - /// The message which should be sent. - msg: msgs::Shutdown, - }, - /// Used to indicate that a channel_reestablish message should be sent to the peer with the given node_id. - SendChannelReestablish { - /// The node_id of the node which should receive this message - node_id: PublicKey, - /// The message which should be sent. - msg: msgs::ChannelReestablish, - }, - /// Used to send a channel_announcement and channel_update to a specific peer, likely on - /// initial connection to ensure our peers know about our channels. - SendChannelAnnouncement { - /// The node_id of the node which should receive this message - node_id: PublicKey, - /// The channel_announcement which should be sent. - msg: msgs::ChannelAnnouncement, - /// The followup channel_update which should be sent. - update_msg: msgs::ChannelUpdate, - }, - /// Used to indicate that a channel_announcement and channel_update should be broadcast to all - /// peers (except the peer with node_id either msg.contents.node_id_1 or msg.contents.node_id_2). - /// - /// Note that after doing so, you very likely (unless you did so very recently) want to - /// broadcast a node_announcement (e.g. via [`PeerManager::broadcast_node_announcement`]). This - /// ensures that any nodes which see our channel_announcement also have a relevant - /// node_announcement, including relevant feature flags which may be important for routing - /// through or to us. - /// - /// [`PeerManager::broadcast_node_announcement`]: crate::ln::peer_handler::PeerManager::broadcast_node_announcement - BroadcastChannelAnnouncement { - /// The channel_announcement which should be sent. - msg: msgs::ChannelAnnouncement, - /// The followup channel_update which should be sent. - update_msg: Option, - }, - /// Used to indicate that a channel_update should be broadcast to all peers. - BroadcastChannelUpdate { - /// The channel_update which should be sent. - msg: msgs::ChannelUpdate, - }, - /// Used to indicate that a node_announcement should be broadcast to all peers. - BroadcastNodeAnnouncement { - /// The node_announcement which should be sent. - msg: msgs::NodeAnnouncement, - }, - /// Used to indicate that a channel_update should be sent to a single peer. - /// In contrast to [`Self::BroadcastChannelUpdate`], this is used when the channel is a - /// private channel and we shouldn't be informing all of our peers of channel parameters. - SendChannelUpdate { - /// The node_id of the node which should receive this message - node_id: PublicKey, - /// The channel_update which should be sent. - msg: msgs::ChannelUpdate, - }, - /// Broadcast an error downstream to be handled - HandleError { - /// The node_id of the node which should receive this message - node_id: PublicKey, - /// The action which should be taken. - action: msgs::ErrorAction - }, - /// Query a peer for channels with funding transaction UTXOs in a block range. - SendChannelRangeQuery { - /// The node_id of this message recipient - node_id: PublicKey, - /// The query_channel_range which should be sent. - msg: msgs::QueryChannelRange, - }, - /// Request routing gossip messages from a peer for a list of channels identified by - /// their short_channel_ids. - SendShortIdsQuery { - /// The node_id of this message recipient - node_id: PublicKey, - /// The query_short_channel_ids which should be sent. - msg: msgs::QueryShortChannelIds, - }, - /// Sends a reply to a channel range query. This may be one of several SendReplyChannelRange events - /// emitted during processing of the query. - SendReplyChannelRange { - /// The node_id of this message recipient - node_id: PublicKey, - /// The reply_channel_range which should be sent. - msg: msgs::ReplyChannelRange, - }, - /// Sends a timestamp filter for inbound gossip. This should be sent on each new connection to - /// enable receiving gossip messages from the peer. - SendGossipTimestampFilter { - /// The node_id of this message recipient - node_id: PublicKey, - /// The gossip_timestamp_filter which should be sent. - msg: msgs::GossipTimestampFilter, - }, - /// Sends a channel partner Peer Storage of our backup which they should store. - /// This should be sent on each new connection to the channel partner or whenever we want - /// them to update the backup that they store. - SendPeerStorage { - /// The node_id of this message recipient - node_id: PublicKey, - /// The peer_storage which should be sent. - msg: msgs::PeerStorage, - }, - /// Sends a channel partner their own peer storage which we store and update when they send - /// a [`msgs::PeerStorage`]. - SendPeerStorageRetrieval { - /// The node_id of this message recipient - node_id: PublicKey, - /// The peer_storage_retrieval which should be sent. - msg: msgs::PeerStorageRetrieval, - } -} - -/// A trait indicating an object may generate message send events -pub trait MessageSendEventsProvider { - /// Gets the list of pending events which were generated by previous actions, clearing the list - /// in the process. - fn get_and_clear_pending_msg_events(&self) -> Vec; -} - /// A trait indicating an object may generate events. /// /// Events are processed by passing an [`EventHandler`] to [`process_pending_events`]. diff --git a/lightning/src/ln/async_payments_tests.rs b/lightning/src/ln/async_payments_tests.rs index 8f45313c516..c8916978afa 100644 --- a/lightning/src/ln/async_payments_tests.rs +++ b/lightning/src/ln/async_payments_tests.rs @@ -11,15 +11,14 @@ use crate::blinded_path::message::{MessageContext, OffersContext}; use crate::blinded_path::payment::PaymentContext; use crate::blinded_path::payment::{AsyncBolt12OfferContext, BlindedPaymentTlvs}; use crate::chain::channelmonitor::{HTLC_FAIL_BACK_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS}; -use crate::events::{ - Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PaymentFailureReason, -}; +use crate::events::{Event, HTLCDestination, PaymentFailureReason}; use crate::ln::blinded_payment_tests::{fail_blinded_htlc_backwards, get_blinded_route_parameters}; use crate::ln::channelmanager::{PaymentId, RecipientOnionFields}; use crate::ln::functional_test_utils::*; use crate::ln::msgs; -use crate::ln::msgs::ChannelMessageHandler; -use crate::ln::msgs::OnionMessageHandler; +use crate::ln::msgs::{ + BaseMessageHandler, ChannelMessageHandler, MessageSendEvent, OnionMessageHandler, +}; use crate::ln::offers_tests; use crate::ln::onion_utils::INVALID_ONION_BLINDING; use crate::ln::outbound_payment::PendingOutboundPayment; diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 2ebf75454f4..94dde115337 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -20,11 +20,11 @@ use crate::chain::channelmonitor::LATENCY_GRACE_PERIOD_BLOCKS; use crate::chain::ChannelMonitorUpdateStatus; use crate::chain::transaction::OutPoint; use crate::events::bump_transaction::WalletSource; -use crate::events::{ClosureReason, Event, MessageSendEvent, MessageSendEventsProvider}; +use crate::events::{ClosureReason, Event}; use crate::ln::chan_utils::ClosingTransaction; use crate::ln::channel_state::{ChannelDetails, ChannelShutdownState}; use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields}; -use crate::ln::msgs::ChannelMessageHandler; +use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; use crate::ln::{functional_test_utils::*, msgs}; use crate::sign::ecdsa::EcdsaChannelSigner; use crate::sign::SignerProvider; diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index 0e0939508c4..9fc722f8205 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -14,7 +14,7 @@ use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature}; use crate::blinded_path; use crate::blinded_path::payment::{BlindedPaymentPath, Bolt12RefundContext, PaymentForwardNode, ForwardTlvs, PaymentConstraints, PaymentContext, PaymentRelay, UnauthenticatedReceiveTlvs}; -use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PaymentFailureReason}; +use crate::events::{Event, HTLCDestination, PaymentFailureReason}; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentHash, PaymentSecret}; use crate::ln::channelmanager; @@ -23,7 +23,7 @@ use crate::types::features::{BlindedHopFeatures, ChannelFeatures, NodeFeatures}; use crate::ln::functional_test_utils::*; use crate::ln::inbound_payment::ExpandedKey; use crate::ln::msgs; -use crate::ln::msgs::{ChannelMessageHandler, UnsignedGossipMessage}; +use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, UnsignedGossipMessage, MessageSendEvent}; use crate::ln::onion_payment; use crate::ln::onion_utils; use crate::ln::onion_utils::INVALID_ONION_BLINDING; diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 497f724a243..e99cf017b66 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -17,12 +17,12 @@ use bitcoin::hash_types::BlockHash; use bitcoin::network::Network; use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor}; use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch}; -use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination}; +use crate::events::{Event, PaymentPurpose, ClosureReason, HTLCDestination}; use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields}; use crate::ln::channel::AnnouncementSigsState; use crate::ln::msgs; use crate::ln::types::ChannelId; -use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler}; +use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, RoutingMessageHandler, MessageSendEvent}; use crate::util::test_channel_signer::TestChannelSigner; use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::test_utils::TestBroadcaster; diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 3771e3fc959..d94a10f9286 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -42,7 +42,7 @@ use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock}; use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator}; use crate::chain::channelmonitor::{Balance, ChannelMonitor, ChannelMonitorUpdate, WithChannelMonitor, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent}; use crate::chain::transaction::{OutPoint, TransactionData}; -use crate::events::{self, Event, EventHandler, EventsProvider, InboundChannelFunds, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination, PaymentFailureReason, ReplayEvent}; +use crate::events::{self, Event, EventHandler, EventsProvider, InboundChannelFunds, ClosureReason, HTLCDestination, PaymentFailureReason, ReplayEvent}; // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. use crate::ln::inbound_payment; @@ -63,7 +63,7 @@ use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htl use crate::ln::msgs; use crate::ln::onion_utils; use crate::ln::onion_utils::{HTLCFailReason, INVALID_ONION_BLINDING}; -use crate::ln::msgs::{ChannelMessageHandler, CommitmentUpdate, DecodeError, LightningError}; +use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, CommitmentUpdate, DecodeError, LightningError, MessageSendEvent}; #[cfg(test)] use crate::ln::outbound_payment; use crate::ln::outbound_payment::{OutboundPayments, PendingOutboundPayment, RetryableInvoiceRequest, SendAlongPathArgs, StaleExpiration}; @@ -1411,8 +1411,8 @@ pub(super) struct PeerState where SP::Target: SignerProvider { /// considered as they are also in [`Self::in_flight_monitor_updates`]). closed_channel_monitor_update_ids: BTreeMap, /// The peer is currently connected (i.e. we've seen a - /// [`ChannelMessageHandler::peer_connected`] and no corresponding - /// [`ChannelMessageHandler::peer_disconnected`]. + /// [`BaseMessageHandler::peer_connected`] and no corresponding + /// [`BaseMessageHandler::peer_disconnected`]. pub is_connected: bool, /// Holds the peer storage data for the channel partner on a per-peer basis. peer_storage: Vec, @@ -1670,7 +1670,7 @@ where /// /// Additionally, it implements the following traits: /// - [`ChannelMessageHandler`] to handle off-chain channel activity from peers -/// - [`MessageSendEventsProvider`] to similarly send such messages to peers +/// - [`BaseMessageHandler`] to handle peer dis/connection and send messages to peers /// - [`OffersMessageHandler`] for BOLT 12 message handling and sending /// - [`EventsProvider`] to generate user-actionable [`Event`]s /// - [`chain::Listen`] and [`chain::Confirm`] for notification of on-chain activity @@ -1771,8 +1771,8 @@ where /// The following is required for [`ChannelManager`] to function properly: /// - Handle messages from peers using its [`ChannelMessageHandler`] implementation (typically /// called by [`PeerManager::read_event`] when processing network I/O) -/// - Send messages to peers obtained via its [`MessageSendEventsProvider`] implementation -/// (typically initiated when [`PeerManager::process_events`] is called) +/// - Process peer connections and send messages to peers obtained via its [`BaseMessageHandler`] +/// implementation (typically initiated when [`PeerManager::process_events`] is called) /// - Feed on-chain activity using either its [`chain::Listen`] or [`chain::Confirm`] implementation /// as documented by those traits /// - Perform any periodic channel and payment checks by calling [`timer_tick_occurred`] roughly @@ -2390,7 +2390,7 @@ where /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest /// [`create_refund_builder`]: Self::create_refund_builder /// [`request_refund_payment`]: Self::request_refund_payment -/// [`peer_disconnected`]: msgs::ChannelMessageHandler::peer_disconnected +/// [`peer_disconnected`]: msgs::BaseMessageHandler::peer_disconnected /// [`funding_created`]: msgs::FundingCreated /// [`funding_transaction_generated`]: Self::funding_transaction_generated /// [`BlockHash`]: bitcoin::hash_types::BlockHash @@ -3001,7 +3001,7 @@ macro_rules! handle_error { $self.finish_close_channel(shutdown_res); if let Some(update) = update_option { let mut pending_broadcast_messages = $self.pending_broadcast_messages.lock().unwrap(); - pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { + pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -3011,7 +3011,7 @@ macro_rules! handle_error { if let msgs::ErrorAction::IgnoreError = err.action { } else { - msg_event = Some(events::MessageSendEvent::HandleError { + msg_event = Some(MessageSendEvent::HandleError { node_id: $counterparty_node_id, action: err.action.clone() }); @@ -3164,7 +3164,7 @@ macro_rules! remove_channel_entry { macro_rules! send_channel_ready { ($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{ - $pending_msg_events.push(events::MessageSendEvent::SendChannelReady { + $pending_msg_events.push(MessageSendEvent::SendChannelReady { node_id: $channel.context.get_counterparty_node_id(), msg: $channel_ready_msg, }); @@ -3242,7 +3242,7 @@ macro_rules! handle_monitor_update_completion { // channels, but there's no reason not to just inform our counterparty of our fees // now. if let Ok(msg) = $self.get_channel_update_for_unicast($chan) { - Some(events::MessageSendEvent::SendChannelUpdate { + Some(MessageSendEvent::SendChannelUpdate { node_id: counterparty_node_id, msg, }) @@ -3750,7 +3750,7 @@ where } if let Some(msg) = res { - peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { + peer_state.pending_msg_events.push(MessageSendEvent::SendOpenChannel { node_id: their_network_key, msg, }); @@ -3912,7 +3912,7 @@ where // We can send the `shutdown` message before updating the `ChannelMonitor` // here as we don't need the monitor update to complete until we send a // `shutdown_signed`, which we'll delay if we're pending a monitor update. - peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + peer_state.pending_msg_events.push(MessageSendEvent::SendShutdown { node_id: *counterparty_node_id, msg: shutdown_msg, }); @@ -3978,7 +3978,7 @@ where /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis /// [`ChannelCloseMinimum`]: crate::chain::chaininterface::ConfirmationTarget::ChannelCloseMinimum /// [`NonAnchorChannelFee`]: crate::chain::chaininterface::ConfirmationTarget::NonAnchorChannelFee - /// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown + /// [`SendShutdown`]: MessageSendEvent::SendShutdown pub fn close_channel(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey) -> Result<(), APIError> { self.close_channel_internal(channel_id, counterparty_node_id, None, None) } @@ -4011,7 +4011,7 @@ where /// /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis /// [`NonAnchorChannelFee`]: crate::chain::chaininterface::ConfirmationTarget::NonAnchorChannelFee - /// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown + /// [`SendShutdown`]: MessageSendEvent::SendShutdown pub fn close_channel_with_feerate_and_script(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option, shutdown_script: Option) -> Result<(), APIError> { self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script) } @@ -4198,7 +4198,7 @@ where if let Some(update) = update_opt { // If we have some Channel Update to broadcast, we cache it and broadcast it later. let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); - pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { + pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -4217,7 +4217,7 @@ where if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { let mut peer_state = peer_state_mutex.lock().unwrap(); peer_state.pending_msg_events.push( - events::MessageSendEvent::HandleError { + MessageSendEvent::HandleError { node_id: counterparty_node_id, action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id: *channel_id, data: error_message } @@ -4725,7 +4725,7 @@ where /// /// [`Event::PaymentSent`]: events::Event::PaymentSent /// [`Event::PaymentFailed`]: events::Event::PaymentFailed - /// [`UpdateHTLCs`]: events::MessageSendEvent::UpdateHTLCs + /// [`UpdateHTLCs`]: MessageSendEvent::UpdateHTLCs /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress pub fn send_payment( @@ -5173,7 +5173,7 @@ where }; if let Some(msg) = msg_opt { - peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated { + peer_state.pending_msg_events.push(MessageSendEvent::SendFundingCreated { node_id: chan.context.get_counterparty_node_id(), msg, }); @@ -5414,7 +5414,7 @@ where let mut close_res = chan.force_shutdown(false, closure_reason); locked_close_channel!(self, peer_state, chan.context(), chan.funding(), close_res); shutdown_results.push(close_res); - peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + peer_state.pending_msg_events.push(MessageSendEvent::HandleError { node_id: counterparty_node_id, action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { @@ -5452,7 +5452,7 @@ where /// [`forwarding_fee_proportional_millionths`]: ChannelConfig::forwarding_fee_proportional_millionths /// [`forwarding_fee_base_msat`]: ChannelConfig::forwarding_fee_base_msat /// [`cltv_expiry_delta`]: ChannelConfig::cltv_expiry_delta - /// [`BroadcastChannelUpdate`]: events::MessageSendEvent::BroadcastChannelUpdate + /// [`BroadcastChannelUpdate`]: MessageSendEvent::BroadcastChannelUpdate /// [`ChannelUpdate`]: msgs::ChannelUpdate /// [`ChannelUnavailable`]: APIError::ChannelUnavailable /// [`APIMisuseError`]: APIError::APIMisuseError @@ -5489,9 +5489,9 @@ where if let Some(channel) = channel.as_funded() { if let Ok(msg) = self.get_channel_update_for_broadcast(channel) { let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); - pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); + pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { msg }); } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) { - peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + peer_state.pending_msg_events.push(MessageSendEvent::SendChannelUpdate { node_id: channel.context.get_counterparty_node_id(), msg, }); @@ -5529,7 +5529,7 @@ where /// [`forwarding_fee_proportional_millionths`]: ChannelConfig::forwarding_fee_proportional_millionths /// [`forwarding_fee_base_msat`]: ChannelConfig::forwarding_fee_base_msat /// [`cltv_expiry_delta`]: ChannelConfig::cltv_expiry_delta - /// [`BroadcastChannelUpdate`]: events::MessageSendEvent::BroadcastChannelUpdate + /// [`BroadcastChannelUpdate`]: MessageSendEvent::BroadcastChannelUpdate /// [`ChannelUpdate`]: msgs::ChannelUpdate /// [`ChannelUnavailable`]: APIError::ChannelUnavailable /// [`APIMisuseError`]: APIError::APIMisuseError @@ -6607,7 +6607,7 @@ where funded_chan.set_channel_update_status(ChannelUpdateStatus::Disabled); if let Ok(update) = self.get_channel_update_for_broadcast(&funded_chan) { let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); - pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { + pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -6622,7 +6622,7 @@ where funded_chan.set_channel_update_status(ChannelUpdateStatus::Enabled); if let Ok(update) = self.get_channel_update_for_broadcast(&funded_chan) { let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); - pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { + pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -6690,7 +6690,7 @@ where let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*chan_id), None); log_error!(logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", &chan_id); peer_state.pending_msg_events.push( - events::MessageSendEvent::HandleError { + MessageSendEvent::HandleError { node_id: counterparty_node_id, action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id: chan_id.clone(), data: "Channel force-closed".to_owned() } @@ -7623,13 +7623,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ send_channel_ready!(self, pending_msg_events, channel, msg); } if let Some(msg) = announcement_sigs { - pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { + pending_msg_events.push(MessageSendEvent::SendAnnouncementSignatures { node_id: counterparty_node_id, msg, }); } if let Some(msg) = tx_signatures { - pending_msg_events.push(events::MessageSendEvent::SendTxSignatures { + pending_msg_events.push(MessageSendEvent::SendTxSignatures { node_id: counterparty_node_id, msg, }); @@ -7637,7 +7637,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ macro_rules! handle_cs { () => { if let Some(update) = commitment_update { - pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + pending_msg_events.push(MessageSendEvent::UpdateHTLCs { node_id: counterparty_node_id, updates: update, }); @@ -7645,7 +7645,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } macro_rules! handle_raa { () => { if let Some(revoke_and_ack) = raa { - pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { + pending_msg_events.push(MessageSendEvent::SendRevokeAndACK { node_id: counterparty_node_id, msg: revoke_and_ack, }); @@ -7839,7 +7839,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ ).map(|mut channel| { let logger = WithChannelContext::from(&self.logger, &channel.context, None); let message_send_event = channel.accept_inbound_channel(&&logger).map(|msg| { - events::MessageSendEvent::SendAcceptChannel { + MessageSendEvent::SendAcceptChannel { node_id: *counterparty_node_id, msg, } @@ -7863,7 +7863,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ ) ), *temporary_channel_id) ).map(|channel| { - let message_send_event = events::MessageSendEvent::SendAcceptChannelV2 { + let message_send_event = MessageSendEvent::SendAcceptChannelV2 { node_id: channel.context.get_counterparty_node_id(), msg: channel.accept_inbound_dual_funded_channel() }; @@ -7901,7 +7901,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // This should have been correctly configured by the call to Inbound(V1/V2)Channel::new. debug_assert!(channel.context().minimum_depth().unwrap() == 0); } else if channel.context().get_channel_type().requires_zero_conf() { - let send_msg_err_event = events::MessageSendEvent::HandleError { + let send_msg_err_event = MessageSendEvent::HandleError { node_id: channel.context().get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage{ msg: msgs::ErrorMessage { channel_id: *temporary_channel_id, data: "No zero confirmation channels accepted".to_owned(), } @@ -7917,7 +7917,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // with unfunded channels, so as long as we aren't over the maximum number of unfunded // channels per-peer we can accept channels from a peer with existing ones. if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS { - let send_msg_err_event = events::MessageSendEvent::HandleError { + let send_msg_err_event = MessageSendEvent::HandleError { node_id: channel.context().get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage{ msg: msgs::ErrorMessage { channel_id: *temporary_channel_id, data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), } @@ -8123,7 +8123,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ ).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id))?; let logger = WithChannelContext::from(&self.logger, &channel.context, None); let message_send_event = channel.accept_inbound_channel(&&logger).map(|msg| { - events::MessageSendEvent::SendAcceptChannel { + MessageSendEvent::SendAcceptChannel { node_id: *counterparty_node_id, msg, } @@ -8137,7 +8137,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ &peer_state.latest_features, msg, user_channel_id, &self.default_configuration, best_block_height, &self.logger, ).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id))?; - let message_send_event = events::MessageSendEvent::SendAcceptChannelV2 { + let message_send_event = MessageSendEvent::SendAcceptChannelV2 { node_id: *counterparty_node_id, msg: channel.accept_inbound_dual_funded_channel(), }; @@ -8268,7 +8268,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // accepted payment from yet. We do, however, need to wait to send our channel_ready // until we have persisted our monitor. if let Some(msg) = funding_msg_opt { - peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned { + peer_state.pending_msg_events.push(MessageSendEvent::SendFundingSigned { node_id: counterparty_node_id.clone(), msg, }); @@ -8491,7 +8491,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push_back((funding_ready_for_sig_event, None)); } - peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + peer_state.pending_msg_events.push(MessageSendEvent::UpdateHTLCs { node_id: counterparty_node_id, updates: CommitmentUpdate { commitment_signed, @@ -8530,7 +8530,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let logger = WithChannelContext::from(&self.logger, &chan.context, None); let tx_signatures_opt = try_channel_entry!(self, peer_state, chan.tx_signatures(msg, &&logger), chan_entry); if let Some(tx_signatures) = tx_signatures_opt { - peer_state.pending_msg_events.push(events::MessageSendEvent::SendTxSignatures { + peer_state.pending_msg_events.push(MessageSendEvent::SendTxSignatures { node_id: *counterparty_node_id, msg: tx_signatures, }); @@ -8606,7 +8606,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // https://github.com/lightning/bolts/blob/247e83d/02-peer-protocol.md?plain=1#L560-L561 // For rationale why we echo back `tx_abort`: // https://github.com/lightning/bolts/blob/247e83d/02-peer-protocol.md?plain=1#L578-L580 - peer_state.pending_msg_events.push(events::MessageSendEvent::SendTxAbort { + peer_state.pending_msg_events.push(MessageSendEvent::SendTxAbort { node_id: *counterparty_node_id, msg, }); @@ -8638,7 +8638,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ self.chain_hash, &self.default_configuration, &self.best_block.read().unwrap(), &&logger), chan_entry); if let Some(announcement_sigs) = announcement_sigs_opt { log_trace!(logger, "Sending announcement_signatures for channel {}", chan.context.channel_id()); - peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { + peer_state.pending_msg_events.push(MessageSendEvent::SendAnnouncementSignatures { node_id: counterparty_node_id.clone(), msg: announcement_sigs, }); @@ -8650,7 +8650,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // announcement_signatures. log_trace!(logger, "Sending private initial channel_update for our counterparty on channel {}", chan.context.channel_id()); if let Ok(msg) = self.get_channel_update_for_unicast(chan) { - peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + peer_state.pending_msg_events.push(MessageSendEvent::SendChannelUpdate { node_id: counterparty_node_id.clone(), msg, }); @@ -8705,7 +8705,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // We can send the `shutdown` message before updating the `ChannelMonitor` // here as we don't need the monitor update to complete until we send a // `shutdown_signed`, which we'll delay if we're pending a monitor update. - peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + peer_state.pending_msg_events.push(MessageSendEvent::SendShutdown { node_id: *counterparty_node_id, msg, }); @@ -8757,7 +8757,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let (closing_signed, tx, shutdown_result) = try_channel_entry!(self, peer_state, chan.closing_signed(&self.fee_estimator, &msg, &&logger), chan_entry); debug_assert_eq!(shutdown_result.is_some(), chan.is_shutdown()); if let Some(msg) = closing_signed { - peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { + peer_state.pending_msg_events.push(MessageSendEvent::SendClosingSigned { node_id: counterparty_node_id.clone(), msg, }); @@ -8791,7 +8791,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(chan) = chan_option.as_ref().and_then(Channel::as_funded) { if let Ok(update) = self.get_channel_update_for_broadcast(chan) { let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); - pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { + pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -9289,7 +9289,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError})); } - peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { + peer_state.pending_msg_events.push(MessageSendEvent::BroadcastChannelAnnouncement { msg: try_channel_entry!(self, peer_state, chan.announcement_signatures( &self.node_signer, self.chain_hash, self.best_block.read().unwrap().height, msg, &self.default_configuration @@ -9387,7 +9387,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ &self.default_configuration, &*self.best_block.read().unwrap()), chan_entry); let mut channel_update = None; if let Some(msg) = responses.shutdown_msg { - peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + peer_state.pending_msg_events.push(MessageSendEvent::SendShutdown { node_id: counterparty_node_id.clone(), msg, }); @@ -9396,7 +9396,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // down), send a unicast channel_update to our counterparty to make sure // they have the latest channel parameters. if let Ok(msg) = self.get_channel_update_for_unicast(chan) { - channel_update = Some(events::MessageSendEvent::SendChannelUpdate { + channel_update = Some(MessageSendEvent::SendChannelUpdate { node_id: chan.context.get_counterparty_node_id(), msg, }); @@ -9511,11 +9511,11 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(funded_chan) = chan.as_funded() { if let Ok(update) = self.get_channel_update_for_broadcast(funded_chan) { let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); - pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { + pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { msg: update }); } - pending_msg_events.push(events::MessageSendEvent::HandleError { + pending_msg_events.push(MessageSendEvent::HandleError { node_id: funded_chan.context.get_counterparty_node_id(), action: msgs::ErrorAction::DisconnectPeer { msg: Some(msgs::ErrorMessage { @@ -9618,28 +9618,28 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let node_id = chan.context().get_counterparty_node_id(); if let Some(msgs) = chan.signer_maybe_unblocked(self.chain_hash, &&logger) { if let Some(msg) = msgs.open_channel { - pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { + pending_msg_events.push(MessageSendEvent::SendOpenChannel { node_id, msg, }); } if let Some(msg) = msgs.funding_created { - pending_msg_events.push(events::MessageSendEvent::SendFundingCreated { + pending_msg_events.push(MessageSendEvent::SendFundingCreated { node_id, msg, }); } if let Some(msg) = msgs.accept_channel { - pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { + pending_msg_events.push(MessageSendEvent::SendAcceptChannel { node_id, msg, }); } - let cu_msg = msgs.commitment_update.map(|updates| events::MessageSendEvent::UpdateHTLCs { + let cu_msg = msgs.commitment_update.map(|updates| MessageSendEvent::UpdateHTLCs { node_id, updates, }); - let raa_msg = msgs.revoke_and_ack.map(|msg| events::MessageSendEvent::SendRevokeAndACK { + let raa_msg = msgs.revoke_and_ack.map(|msg| MessageSendEvent::SendRevokeAndACK { node_id, msg, }); @@ -9657,13 +9657,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ (_, _) => {}, } if let Some(msg) = msgs.funding_signed { - pending_msg_events.push(events::MessageSendEvent::SendFundingSigned { + pending_msg_events.push(MessageSendEvent::SendFundingSigned { node_id, msg, }); } if let Some(msg) = msgs.closing_signed { - pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { + pending_msg_events.push(MessageSendEvent::SendClosingSigned { node_id, msg, }); @@ -9677,7 +9677,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]); if let Ok(update) = self.get_channel_update_for_broadcast(&funded_chan) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + pending_msg_events.push(MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -9750,7 +9750,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Ok((msg_opt, tx_opt, shutdown_result_opt)) => { if let Some(msg) = msg_opt { has_update = true; - pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { + pending_msg_events.push(MessageSendEvent::SendClosingSigned { node_id: funded_chan.context.get_counterparty_node_id(), msg, }); } @@ -9764,7 +9764,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // a closing_signed with a closing transaction to broadcast. if let Ok(update) = self.get_channel_update_for_broadcast(&funded_chan) { let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); - pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { + pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { msg: update }); } @@ -9813,7 +9813,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ match funded_chan.try_send_stfu(&&logger) { Ok(None) => {}, Ok(Some(stfu)) => { - pending_msg_events.push(events::MessageSendEvent::SendStfu { + pending_msg_events.push(MessageSendEvent::SendStfu { node_id: chan.context().get_counterparty_node_id(), msg: stfu, }); @@ -11056,7 +11056,7 @@ where } } -impl MessageSendEventsProvider for ChannelManager +impl BaseMessageHandler for ChannelManager where M::Target: chain::Watch<::EcdsaSigner>, T::Target: BroadcasterInterface, @@ -11068,6 +11068,219 @@ where MR::Target: MessageRouter, L::Target: Logger, { + fn provided_node_features(&self) -> NodeFeatures { + provided_node_features(&self.default_configuration) + } + + fn provided_init_features(&self, _their_init_features: PublicKey) -> InitFeatures { + provided_init_features(&self.default_configuration) + } + + fn peer_disconnected(&self, counterparty_node_id: PublicKey) { + let _persistence_guard = PersistenceNotifierGuard::optionally_notify( + self, || NotifyOption::SkipPersistHandleEvents); + let mut failed_channels = Vec::new(); + let mut per_peer_state = self.per_peer_state.write().unwrap(); + let remove_peer = { + log_debug!( + WithContext::from(&self.logger, Some(counterparty_node_id), None, None), + "Marking channels with {} disconnected and generating channel_updates.", + log_pubkey!(counterparty_node_id) + ); + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + let pending_msg_events = &mut peer_state.pending_msg_events; + peer_state.channel_by_id.retain(|_, chan| { + let logger = WithChannelContext::from(&self.logger, &chan.context(), None); + if chan.peer_disconnected_is_resumable(&&logger) { + return true; + } + // Clean up for removal. + let mut close_res = chan.force_shutdown(false, ClosureReason::DisconnectedPeer); + let (funding, context) = chan.funding_and_context_mut(); + locked_close_channel!(self, peer_state, &context, funding, close_res); + failed_channels.push(close_res); + false + }); + // Note that we don't bother generating any events for pre-accept channels - + // they're not considered "channels" yet from the PoV of our events interface. + peer_state.inbound_channel_request_by_id.clear(); + pending_msg_events.retain(|msg| { + match msg { + // V1 Channel Establishment + &MessageSendEvent::SendAcceptChannel { .. } => false, + &MessageSendEvent::SendOpenChannel { .. } => false, + &MessageSendEvent::SendFundingCreated { .. } => false, + &MessageSendEvent::SendFundingSigned { .. } => false, + // V2 Channel Establishment + &MessageSendEvent::SendAcceptChannelV2 { .. } => false, + &MessageSendEvent::SendOpenChannelV2 { .. } => false, + // Common Channel Establishment + &MessageSendEvent::SendChannelReady { .. } => false, + &MessageSendEvent::SendAnnouncementSignatures { .. } => false, + // Quiescence + &MessageSendEvent::SendStfu { .. } => false, + // Splicing + &MessageSendEvent::SendSpliceInit { .. } => false, + &MessageSendEvent::SendSpliceAck { .. } => false, + &MessageSendEvent::SendSpliceLocked { .. } => false, + // Interactive Transaction Construction + &MessageSendEvent::SendTxAddInput { .. } => false, + &MessageSendEvent::SendTxAddOutput { .. } => false, + &MessageSendEvent::SendTxRemoveInput { .. } => false, + &MessageSendEvent::SendTxRemoveOutput { .. } => false, + &MessageSendEvent::SendTxComplete { .. } => false, + &MessageSendEvent::SendTxSignatures { .. } => false, + &MessageSendEvent::SendTxInitRbf { .. } => false, + &MessageSendEvent::SendTxAckRbf { .. } => false, + &MessageSendEvent::SendTxAbort { .. } => false, + // Channel Operations + &MessageSendEvent::UpdateHTLCs { .. } => false, + &MessageSendEvent::SendRevokeAndACK { .. } => false, + &MessageSendEvent::SendClosingSigned { .. } => false, + &MessageSendEvent::SendShutdown { .. } => false, + &MessageSendEvent::SendChannelReestablish { .. } => false, + &MessageSendEvent::HandleError { .. } => false, + // Gossip + &MessageSendEvent::SendChannelAnnouncement { .. } => false, + &MessageSendEvent::BroadcastChannelAnnouncement { .. } => true, + // [`ChannelManager::pending_broadcast_events`] holds the [`BroadcastChannelUpdate`] + // This check here is to ensure exhaustivity. + &MessageSendEvent::BroadcastChannelUpdate { .. } => { + debug_assert!(false, "This event shouldn't have been here"); + false + }, + &MessageSendEvent::BroadcastNodeAnnouncement { .. } => true, + &MessageSendEvent::SendChannelUpdate { .. } => false, + &MessageSendEvent::SendChannelRangeQuery { .. } => false, + &MessageSendEvent::SendShortIdsQuery { .. } => false, + &MessageSendEvent::SendReplyChannelRange { .. } => false, + &MessageSendEvent::SendGossipTimestampFilter { .. } => false, + + // Peer Storage + &MessageSendEvent::SendPeerStorage { .. } => false, + &MessageSendEvent::SendPeerStorageRetrieval { .. } => false, + } + }); + debug_assert!(peer_state.is_connected, "A disconnected peer cannot disconnect"); + peer_state.is_connected = false; + peer_state.ok_to_remove(true) + } else { debug_assert!(false, "Unconnected peer disconnected"); true } + }; + if remove_peer { + per_peer_state.remove(&counterparty_node_id); + } + mem::drop(per_peer_state); + + for failure in failed_channels.drain(..) { + self.finish_close_channel(failure); + } + } + + fn peer_connected(&self, counterparty_node_id: PublicKey, init_msg: &msgs::Init, inbound: bool) -> Result<(), ()> { + let logger = WithContext::from(&self.logger, Some(counterparty_node_id), None, None); + if !init_msg.features.supports_static_remote_key() { + log_debug!(logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id)); + return Err(()); + } + + let mut res = Ok(()); + + PersistenceNotifierGuard::optionally_notify(self, || { + // If we have too many peers connected which don't have funded channels, disconnect the + // peer immediately (as long as it doesn't have funded channels). If we have a bunch of + // unfunded channels taking up space in memory for disconnected peers, we still let new + // peers connect, but we'll reject new channels from them. + let connected_peers_without_funded_channels = self.peers_without_funded_channels(|node| node.is_connected); + let inbound_peer_limited = inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS; + + { + let mut peer_state_lock = self.per_peer_state.write().unwrap(); + match peer_state_lock.entry(counterparty_node_id.clone()) { + hash_map::Entry::Vacant(e) => { + if inbound_peer_limited { + res = Err(()); + return NotifyOption::SkipPersistNoEvents; + } + e.insert(Mutex::new(PeerState { + channel_by_id: new_hash_map(), + inbound_channel_request_by_id: new_hash_map(), + latest_features: init_msg.features.clone(), + pending_msg_events: Vec::new(), + in_flight_monitor_updates: BTreeMap::new(), + monitor_update_blocked_actions: BTreeMap::new(), + actions_blocking_raa_monitor_updates: BTreeMap::new(), + closed_channel_monitor_update_ids: BTreeMap::new(), + is_connected: true, + peer_storage: Vec::new(), + })); + }, + hash_map::Entry::Occupied(e) => { + let mut peer_state = e.get().lock().unwrap(); + peer_state.latest_features = init_msg.features.clone(); + + let best_block_height = self.best_block.read().unwrap().height; + if inbound_peer_limited && + Self::unfunded_channel_count(&*peer_state, best_block_height) == + peer_state.channel_by_id.len() + { + res = Err(()); + return NotifyOption::SkipPersistNoEvents; + } + + debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice"); + peer_state.is_connected = true; + }, + } + } + + log_debug!(logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id)); + + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + let pending_msg_events = &mut peer_state.pending_msg_events; + + if !peer_state.peer_storage.is_empty() { + pending_msg_events.push(MessageSendEvent::SendPeerStorageRetrieval { + node_id: counterparty_node_id.clone(), + msg: msgs::PeerStorageRetrieval { + data: peer_state.peer_storage.clone() + }, + }); + } + + for (_, chan) in peer_state.channel_by_id.iter_mut() { + let logger = WithChannelContext::from(&self.logger, &chan.context(), None); + match chan.peer_connected_get_handshake(self.chain_hash, &&logger) { + ReconnectionMsg::Reestablish(msg) => + pending_msg_events.push(MessageSendEvent::SendChannelReestablish { + node_id: chan.context().get_counterparty_node_id(), + msg, + }), + ReconnectionMsg::Open(OpenChannelMessage::V1(msg)) => + pending_msg_events.push(MessageSendEvent::SendOpenChannel { + node_id: chan.context().get_counterparty_node_id(), + msg, + }), + ReconnectionMsg::Open(OpenChannelMessage::V2(msg)) => + pending_msg_events.push(MessageSendEvent::SendOpenChannelV2 { + node_id: chan.context().get_counterparty_node_id(), + msg, + }), + ReconnectionMsg::None => {}, + } + } + } + + return NotifyOption::SkipPersistHandleEvents; + //TODO: Also re-broadcast announcement_signatures + }); + res + } + /// Returns `MessageSendEvent`s strictly ordered per-peer, in the order they were generated. /// The returned array will contain `MessageSendEvent`s for different peers if /// `MessageSendEvent`s to more than one peer exists, but `MessageSendEvent`s to the same peer @@ -11379,7 +11592,7 @@ where if funded_channel.context.is_usable() { log_trace!(logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", funded_channel.context.channel_id()); if let Ok(msg) = self.get_channel_update_for_unicast(funded_channel) { - pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + pending_msg_events.push(MessageSendEvent::SendChannelUpdate { node_id: funded_channel.context.get_counterparty_node_id(), msg, }); @@ -11418,7 +11631,7 @@ where if let Some(announcement) = funded_channel.get_signed_channel_announcement( &self.node_signer, self.chain_hash, height, &self.default_configuration, ) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { + pending_msg_events.push(MessageSendEvent::BroadcastChannelAnnouncement { msg: announcement, // Note that get_signed_channel_announcement fails // if the channel cannot be announced, so @@ -11431,7 +11644,7 @@ where } if let Some(announcement_sigs) = announcement_sigs { log_trace!(logger, "Sending announcement_signatures for channel {}", funded_channel.context.channel_id()); - pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { + pending_msg_events.push(MessageSendEvent::SendAnnouncementSignatures { node_id: funded_channel.context.get_counterparty_node_id(), msg: announcement_sigs, }); @@ -11460,11 +11673,11 @@ where failed_channels.push(close_res); if let Ok(update) = self.get_channel_update_for_broadcast(&funded_channel) { let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); - pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { + pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { msg: update }); } - pending_msg_events.push(events::MessageSendEvent::HandleError { + pending_msg_events.push(MessageSendEvent::HandleError { node_id: funded_channel.context.get_counterparty_node_id(), action: msgs::ErrorAction::DisconnectPeer { msg: Some(msgs::ErrorMessage { @@ -11871,211 +12084,6 @@ where }); } - fn peer_disconnected(&self, counterparty_node_id: PublicKey) { - let _persistence_guard = PersistenceNotifierGuard::optionally_notify( - self, || NotifyOption::SkipPersistHandleEvents); - let mut failed_channels = Vec::new(); - let mut per_peer_state = self.per_peer_state.write().unwrap(); - let remove_peer = { - log_debug!( - WithContext::from(&self.logger, Some(counterparty_node_id), None, None), - "Marking channels with {} disconnected and generating channel_updates.", - log_pubkey!(counterparty_node_id) - ); - if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - let pending_msg_events = &mut peer_state.pending_msg_events; - peer_state.channel_by_id.retain(|_, chan| { - let logger = WithChannelContext::from(&self.logger, &chan.context(), None); - if chan.peer_disconnected_is_resumable(&&logger) { - return true; - } - // Clean up for removal. - let mut close_res = chan.force_shutdown(false, ClosureReason::DisconnectedPeer); - let (funding, context) = chan.funding_and_context_mut(); - locked_close_channel!(self, peer_state, &context, funding, close_res); - failed_channels.push(close_res); - false - }); - // Note that we don't bother generating any events for pre-accept channels - - // they're not considered "channels" yet from the PoV of our events interface. - peer_state.inbound_channel_request_by_id.clear(); - pending_msg_events.retain(|msg| { - match msg { - // V1 Channel Establishment - &events::MessageSendEvent::SendAcceptChannel { .. } => false, - &events::MessageSendEvent::SendOpenChannel { .. } => false, - &events::MessageSendEvent::SendFundingCreated { .. } => false, - &events::MessageSendEvent::SendFundingSigned { .. } => false, - // V2 Channel Establishment - &events::MessageSendEvent::SendAcceptChannelV2 { .. } => false, - &events::MessageSendEvent::SendOpenChannelV2 { .. } => false, - // Common Channel Establishment - &events::MessageSendEvent::SendChannelReady { .. } => false, - &events::MessageSendEvent::SendAnnouncementSignatures { .. } => false, - // Quiescence - &events::MessageSendEvent::SendStfu { .. } => false, - // Splicing - &events::MessageSendEvent::SendSpliceInit { .. } => false, - &events::MessageSendEvent::SendSpliceAck { .. } => false, - &events::MessageSendEvent::SendSpliceLocked { .. } => false, - // Interactive Transaction Construction - &events::MessageSendEvent::SendTxAddInput { .. } => false, - &events::MessageSendEvent::SendTxAddOutput { .. } => false, - &events::MessageSendEvent::SendTxRemoveInput { .. } => false, - &events::MessageSendEvent::SendTxRemoveOutput { .. } => false, - &events::MessageSendEvent::SendTxComplete { .. } => false, - &events::MessageSendEvent::SendTxSignatures { .. } => false, - &events::MessageSendEvent::SendTxInitRbf { .. } => false, - &events::MessageSendEvent::SendTxAckRbf { .. } => false, - &events::MessageSendEvent::SendTxAbort { .. } => false, - // Channel Operations - &events::MessageSendEvent::UpdateHTLCs { .. } => false, - &events::MessageSendEvent::SendRevokeAndACK { .. } => false, - &events::MessageSendEvent::SendClosingSigned { .. } => false, - &events::MessageSendEvent::SendShutdown { .. } => false, - &events::MessageSendEvent::SendChannelReestablish { .. } => false, - &events::MessageSendEvent::HandleError { .. } => false, - // Gossip - &events::MessageSendEvent::SendChannelAnnouncement { .. } => false, - &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true, - // [`ChannelManager::pending_broadcast_events`] holds the [`BroadcastChannelUpdate`] - // This check here is to ensure exhaustivity. - &events::MessageSendEvent::BroadcastChannelUpdate { .. } => { - debug_assert!(false, "This event shouldn't have been here"); - false - }, - &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true, - &events::MessageSendEvent::SendChannelUpdate { .. } => false, - &events::MessageSendEvent::SendChannelRangeQuery { .. } => false, - &events::MessageSendEvent::SendShortIdsQuery { .. } => false, - &events::MessageSendEvent::SendReplyChannelRange { .. } => false, - &events::MessageSendEvent::SendGossipTimestampFilter { .. } => false, - - // Peer Storage - &events::MessageSendEvent::SendPeerStorage { .. } => false, - &events::MessageSendEvent::SendPeerStorageRetrieval { .. } => false, - } - }); - debug_assert!(peer_state.is_connected, "A disconnected peer cannot disconnect"); - peer_state.is_connected = false; - peer_state.ok_to_remove(true) - } else { debug_assert!(false, "Unconnected peer disconnected"); true } - }; - if remove_peer { - per_peer_state.remove(&counterparty_node_id); - } - mem::drop(per_peer_state); - - for failure in failed_channels.drain(..) { - self.finish_close_channel(failure); - } - } - - fn peer_connected(&self, counterparty_node_id: PublicKey, init_msg: &msgs::Init, inbound: bool) -> Result<(), ()> { - let logger = WithContext::from(&self.logger, Some(counterparty_node_id), None, None); - if !init_msg.features.supports_static_remote_key() { - log_debug!(logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id)); - return Err(()); - } - - let mut res = Ok(()); - - PersistenceNotifierGuard::optionally_notify(self, || { - // If we have too many peers connected which don't have funded channels, disconnect the - // peer immediately (as long as it doesn't have funded channels). If we have a bunch of - // unfunded channels taking up space in memory for disconnected peers, we still let new - // peers connect, but we'll reject new channels from them. - let connected_peers_without_funded_channels = self.peers_without_funded_channels(|node| node.is_connected); - let inbound_peer_limited = inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS; - - { - let mut peer_state_lock = self.per_peer_state.write().unwrap(); - match peer_state_lock.entry(counterparty_node_id.clone()) { - hash_map::Entry::Vacant(e) => { - if inbound_peer_limited { - res = Err(()); - return NotifyOption::SkipPersistNoEvents; - } - e.insert(Mutex::new(PeerState { - channel_by_id: new_hash_map(), - inbound_channel_request_by_id: new_hash_map(), - latest_features: init_msg.features.clone(), - pending_msg_events: Vec::new(), - in_flight_monitor_updates: BTreeMap::new(), - monitor_update_blocked_actions: BTreeMap::new(), - actions_blocking_raa_monitor_updates: BTreeMap::new(), - closed_channel_monitor_update_ids: BTreeMap::new(), - is_connected: true, - peer_storage: Vec::new(), - })); - }, - hash_map::Entry::Occupied(e) => { - let mut peer_state = e.get().lock().unwrap(); - peer_state.latest_features = init_msg.features.clone(); - - let best_block_height = self.best_block.read().unwrap().height; - if inbound_peer_limited && - Self::unfunded_channel_count(&*peer_state, best_block_height) == - peer_state.channel_by_id.len() - { - res = Err(()); - return NotifyOption::SkipPersistNoEvents; - } - - debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice"); - peer_state.is_connected = true; - }, - } - } - - log_debug!(logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id)); - - let per_peer_state = self.per_peer_state.read().unwrap(); - if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - let pending_msg_events = &mut peer_state.pending_msg_events; - - if !peer_state.peer_storage.is_empty() { - pending_msg_events.push(events::MessageSendEvent::SendPeerStorageRetrieval { - node_id: counterparty_node_id.clone(), - msg: msgs::PeerStorageRetrieval { - data: peer_state.peer_storage.clone() - }, - }); - } - - for (_, chan) in peer_state.channel_by_id.iter_mut() { - let logger = WithChannelContext::from(&self.logger, &chan.context(), None); - match chan.peer_connected_get_handshake(self.chain_hash, &&logger) { - ReconnectionMsg::Reestablish(msg) => - pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { - node_id: chan.context().get_counterparty_node_id(), - msg, - }), - ReconnectionMsg::Open(OpenChannelMessage::V1(msg)) => - pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { - node_id: chan.context().get_counterparty_node_id(), - msg, - }), - ReconnectionMsg::Open(OpenChannelMessage::V2(msg)) => - pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 { - node_id: chan.context().get_counterparty_node_id(), - msg, - }), - ReconnectionMsg::None => {}, - } - } - } - - return NotifyOption::SkipPersistHandleEvents; - //TODO: Also re-broadcast announcement_signatures - }); - res - } - fn handle_error(&self, counterparty_node_id: PublicKey, msg: &msgs::ErrorMessage) { match &msg.data as &str { "cannot co-op close channel w/ active htlcs"| @@ -12101,12 +12109,12 @@ where .and_then(Channel::as_funded) { if let Some(msg) = chan.get_outbound_shutdown() { - peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + peer_state.pending_msg_events.push(MessageSendEvent::SendShutdown { node_id: counterparty_node_id, msg, }); } - peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + peer_state.pending_msg_events.push(MessageSendEvent::HandleError { node_id: counterparty_node_id, action: msgs::ErrorAction::SendWarningMessage { msg: msgs::WarningMessage { @@ -12160,14 +12168,14 @@ where self.chain_hash, &self.fee_estimator, &self.logger, ) { Ok(Some(OpenChannelMessage::V1(msg))) => { - peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel { + peer_state.pending_msg_events.push(MessageSendEvent::SendOpenChannel { node_id: counterparty_node_id, msg, }); return; }, Ok(Some(OpenChannelMessage::V2(msg))) => { - peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 { + peer_state.pending_msg_events.push(MessageSendEvent::SendOpenChannelV2 { node_id: counterparty_node_id, msg, }); @@ -12184,14 +12192,6 @@ where } } - fn provided_node_features(&self) -> NodeFeatures { - provided_node_features(&self.default_configuration) - } - - fn provided_init_features(&self, _their_init_features: PublicKey) -> InitFeatures { - provided_init_features(&self.default_configuration) - } - fn get_chain_hashes(&self) -> Option> { Some(vec![self.chain_hash]) } @@ -14836,13 +14836,12 @@ mod tests { use bitcoin::hashes::Hash; use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey}; use core::sync::atomic::Ordering; - use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; + use crate::events::{Event, HTLCDestination, ClosureReason}; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentPreimage, PaymentHash, PaymentSecret}; use crate::ln::channelmanager::{create_recv_pending_htlc_info, inbound_payment, ChannelConfigOverrides, HTLCForwardInfo, InterceptId, PaymentId, RecipientOnionFields}; use crate::ln::functional_test_utils::*; - use crate::ln::msgs::{self, AcceptChannel, ErrorAction}; - use crate::ln::msgs::ChannelMessageHandler; + use crate::ln::msgs::{self, BaseMessageHandler, ChannelMessageHandler, AcceptChannel, ErrorAction, MessageSendEvent}; use crate::ln::outbound_payment::Retry; use crate::prelude::*; use crate::routing::router::{PaymentParameters, RouteParameters, find_route}; @@ -16320,10 +16319,10 @@ pub mod bench { use crate::chain::Listen; use crate::chain::chainmonitor::{ChainMonitor, Persist}; use crate::sign::{KeysManager, InMemorySigner}; - use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider}; + use crate::events::Event; use crate::ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentId, RecipientOnionFields, Retry}; use crate::ln::functional_test_utils::*; - use crate::ln::msgs::{ChannelMessageHandler, Init}; + use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, Init, MessageSendEvent}; use crate::routing::gossip::NetworkGraph; use crate::routing::router::{PaymentParameters, RouteParameters}; use crate::util::test_utils; @@ -16336,7 +16335,7 @@ pub mod bench { use bitcoin::{Transaction, TxOut}; use bitcoin::transaction::Version; - use crate::sync::{Arc, Mutex, RwLock}; + use crate::sync::{Arc, RwLock}; use criterion::Criterion; diff --git a/lightning/src/ln/dual_funding_tests.rs b/lightning/src/ln/dual_funding_tests.rs index 72fa049d6b0..056544b3d16 100644 --- a/lightning/src/ln/dual_funding_tests.rs +++ b/lightning/src/ln/dual_funding_tests.rs @@ -11,7 +11,7 @@ use { crate::chain::chaininterface::{ConfirmationTarget, LowerBoundedFeeEstimator}, - crate::events::{Event, MessageSendEvent, MessageSendEventsProvider}, + crate::events::Event, crate::ln::chan_utils::{ make_funding_redeemscript, ChannelPublicKeys, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, @@ -19,7 +19,7 @@ use { crate::ln::channel::PendingV2Channel, crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint}, crate::ln::functional_test_utils::*, - crate::ln::msgs::ChannelMessageHandler, + crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}, crate::ln::msgs::{CommitmentSigned, TxAddInput, TxAddOutput, TxComplete, TxSignatures}, crate::ln::types::ChannelId, crate::prelude::*, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 67470b0ce45..2e5b57cf4b7 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -13,14 +13,14 @@ use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen, Watch, chainmonitor::Persist}; use crate::chain::channelmonitor::ChannelMonitor; use crate::chain::transaction::OutPoint; -use crate::events::{ClaimedHTLC, ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, PaymentFailureReason}; +use crate::events::{ClaimedHTLC, ClosureReason, Event, HTLCDestination, PathFailure, PaymentPurpose, PaymentFailureReason}; use crate::events::bump_transaction::{BumpTransactionEvent, BumpTransactionEventHandler, Wallet, WalletSource}; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentPreimage, PaymentHash, PaymentSecret}; use crate::ln::channelmanager::{AChannelManager, ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, RecipientOnionFields, PaymentId, MIN_CLTV_EXPIRY_DELTA}; use crate::types::features::InitFeatures; use crate::ln::msgs; -use crate::ln::msgs::{ChannelMessageHandler, OnionMessageHandler, RoutingMessageHandler}; +use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent, RoutingMessageHandler}; use crate::ln::outbound_payment::Retry; use crate::ln::peer_handler::IgnoringMessageHandler; use crate::onion_message::messenger::OnionMessenger; diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 91c67c7e426..3526181c72e 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -19,7 +19,7 @@ use crate::chain::channelmonitor::{Balance, ChannelMonitorUpdateStep, CLTV_CLAIM use crate::chain::transaction::OutPoint; use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, OutputSpender, SignerProvider}; use crate::events::bump_transaction::WalletSource; -use crate::events::{Event, FundingInfo, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason}; +use crate::events::{Event, FundingInfo, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason}; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentPreimage, PaymentSecret, PaymentHash}; use crate::ln::channel::{get_holder_selected_channel_reserve_satoshis, Channel, InboundV1Channel, OutboundV1Channel, COINBASE_MATURITY, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT}; @@ -30,8 +30,8 @@ use crate::ln::chan_utils::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_ use crate::routing::gossip::{NetworkGraph, NetworkUpdate}; use crate::routing::router::{Path, PaymentParameters, Route, RouteHop, get_route, RouteParameters}; use crate::types::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures}; -use crate::ln::msgs::{self, AcceptChannel}; -use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction}; +use crate::ln::msgs; +use crate::ln::msgs::{AcceptChannel, BaseMessageHandler, ChannelMessageHandler, RoutingMessageHandler, ErrorAction, MessageSendEvent}; use crate::util::test_channel_signer::TestChannelSigner; use crate::util::test_utils::{self, TestLogger, WatchtowerPersister}; use crate::util::errors::APIError; diff --git a/lightning/src/ln/interactivetxs.rs b/lightning/src/ln/interactivetxs.rs index 2ed66e83d43..9fbcaef92d2 100644 --- a/lightning/src/ln/interactivetxs.rs +++ b/lightning/src/ln/interactivetxs.rs @@ -21,10 +21,9 @@ use bitcoin::{OutPoint, ScriptBuf, Sequence, Transaction, TxIn, TxOut, Txid, Wei use crate::chain::chaininterface::fee_for_weight; use crate::events::bump_transaction::{BASE_INPUT_WEIGHT, EMPTY_SCRIPT_SIG_WEIGHT}; -use crate::events::MessageSendEvent; use crate::ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS; use crate::ln::msgs; -use crate::ln::msgs::{SerialId, TxSignatures}; +use crate::ln::msgs::{MessageSendEvent, SerialId, TxSignatures}; use crate::ln::types::ChannelId; use crate::sign::{EntropySource, P2TR_KEY_PATH_WITNESS_WEIGHT, P2WPKH_WITNESS_WEIGHT}; use crate::util::ser::TransactionU16LenLimited; diff --git a/lightning/src/ln/invoice_utils.rs b/lightning/src/ln/invoice_utils.rs index da3d370d70e..3c67b7c581f 100644 --- a/lightning/src/ln/invoice_utils.rs +++ b/lightning/src/ln/invoice_utils.rs @@ -711,11 +711,10 @@ mod test { use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::network::Network; use crate::sign::PhantomKeysManager; - use crate::events::{MessageSendEvent, MessageSendEventsProvider}; use crate::types::payment::{PaymentHash, PaymentPreimage}; use crate::ln::channelmanager::{Bolt11InvoiceParameters, PhantomRouteHints, MIN_FINAL_CLTV_EXPIRY_DELTA, PaymentId, RecipientOnionFields, Retry}; use crate::ln::functional_test_utils::*; - use crate::ln::msgs::ChannelMessageHandler; + use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; use crate::routing::router::{PaymentParameters, RouteParameters}; use crate::util::test_utils; use crate::util::config::UserConfig; diff --git a/lightning/src/ln/max_payment_path_len_tests.rs b/lightning/src/ln/max_payment_path_len_tests.rs index fb1004062a0..3c424c9a393 100644 --- a/lightning/src/ln/max_payment_path_len_tests.rs +++ b/lightning/src/ln/max_payment_path_len_tests.rs @@ -13,14 +13,14 @@ use bitcoin::secp256k1::{Secp256k1, PublicKey}; use crate::blinded_path::BlindedHop; use crate::blinded_path::payment::{BlindedPayInfo, BlindedPaymentPath, Bolt12RefundContext, PaymentConstraints, PaymentContext, UnauthenticatedReceiveTlvs}; -use crate::events::{Event, MessageSendEventsProvider}; +use crate::events::Event; use crate::types::payment::PaymentSecret; use crate::ln::blinded_payment_tests::get_blinded_route_parameters; use crate::ln::channelmanager::PaymentId; use crate::types::features::BlindedHopFeatures; use crate::ln::functional_test_utils::*; use crate::ln::msgs; -use crate::ln::msgs::OnionMessageHandler; +use crate::ln::msgs::{BaseMessageHandler, OnionMessageHandler}; use crate::ln::onion_utils; use crate::ln::onion_utils::MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY; use crate::ln::outbound_payment::{RecipientOnionFields, Retry, RetryableSendFailure}; diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index eed5226902f..0a42d0a8b99 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -14,12 +14,12 @@ use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ARCHIVAL_DELAY_BLOCKS,LATEN use crate::chain::transaction::OutPoint; use crate::chain::chaininterface::{ConfirmationTarget, LowerBoundedFeeEstimator, compute_feerate_sat_per_1000_weight}; use crate::events::bump_transaction::{BumpTransactionEvent, WalletSource}; -use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination}; +use crate::events::{Event, ClosureReason, HTLCDestination}; use crate::ln::channel; use crate::ln::types::ChannelId; use crate::ln::chan_utils; use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, PaymentId, RecipientOnionFields}; -use crate::ln::msgs::ChannelMessageHandler; +use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; use crate::crypto::utils::sign; use crate::util::ser::Writeable; use crate::util::scid_utils::block_from_scid; diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 0a53d3649e4..81505a92cc7 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -55,7 +55,6 @@ use core::fmt::Display; use crate::io::{self, Cursor, Read}; use crate::io_extras::read_to_end; -use crate::events::MessageSendEventsProvider; use crate::crypto::streams::ChaChaPolyReadAdapter; use crate::util::logger; use crate::util::ser::{BigSize, FixedLengthReader, HighZeroBytesDroppedBigSize, Hostname, LengthRead, LengthReadable, LengthReadableArgs, Readable, ReadableArgs, TransactionU16LenLimited, WithoutLength, Writeable, Writer}; @@ -1517,11 +1516,345 @@ pub struct CommitmentUpdate { pub commitment_signed: CommitmentSigned, } +/// An event generated by a [`BaseMessageHandler`] which indicates a message should be sent to a +/// peer (or broadcast to most peers). +/// +/// These events are handled by [`PeerManager::process_events`] if you are using a [`PeerManager`]. +/// +/// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events +/// [`PeerManager`]: crate::ln::peer_handler::PeerManager +#[derive(Clone, Debug)] +#[cfg_attr(any(test, feature = "_test_utils"), derive(PartialEq))] +pub enum MessageSendEvent { + /// Used to indicate that we've accepted a channel open and should send the accept_channel + /// message provided to the given peer. + SendAcceptChannel { + /// The node_id of the node which should receive this message + node_id: PublicKey, + /// The message which should be sent. + msg: AcceptChannel, + }, + /// Used to indicate that we've accepted a V2 channel open and should send the accept_channel2 + /// message provided to the given peer. + SendAcceptChannelV2 { + /// The node_id of the node which should receive this message + node_id: PublicKey, + /// The message which should be sent. + msg: AcceptChannelV2, + }, + /// Used to indicate that we've initiated a channel open and should send the open_channel + /// message provided to the given peer. + SendOpenChannel { + /// The node_id of the node which should receive this message + node_id: PublicKey, + /// The message which should be sent. + msg: OpenChannel, + }, + /// Used to indicate that we've initiated a V2 channel open and should send the open_channel2 + /// message provided to the given peer. + SendOpenChannelV2 { + /// The node_id of the node which should receive this message + node_id: PublicKey, + /// The message which should be sent. + msg: OpenChannelV2, + }, + /// Used to indicate that a funding_created message should be sent to the peer with the given node_id. + SendFundingCreated { + /// The node_id of the node which should receive this message + node_id: PublicKey, + /// The message which should be sent. + msg: FundingCreated, + }, + /// Used to indicate that a funding_signed message should be sent to the peer with the given node_id. + SendFundingSigned { + /// The node_id of the node which should receive this message + node_id: PublicKey, + /// The message which should be sent. + msg: FundingSigned, + }, + /// Used to indicate that a stfu message should be sent to the peer with the given node id. + SendStfu { + /// The node_id of the node which should receive this message + node_id: PublicKey, + /// The message which should be sent. + msg: Stfu, + }, + /// Used to indicate that a splice_init message should be sent to the peer with the given node id. + SendSpliceInit { + /// The node_id of the node which should receive this message + node_id: PublicKey, + /// The message which should be sent. + msg: SpliceInit, + }, + /// Used to indicate that a splice_ack message should be sent to the peer with the given node id. + SendSpliceAck { + /// The node_id of the node which should receive this message + node_id: PublicKey, + /// The message which should be sent. + msg: SpliceAck, + }, + /// Used to indicate that a splice_locked message should be sent to the peer with the given node id. + SendSpliceLocked { + /// The node_id of the node which should receive this message + node_id: PublicKey, + /// The message which should be sent. + msg: SpliceLocked, + }, + /// Used to indicate that a tx_add_input message should be sent to the peer with the given node_id. + SendTxAddInput { + /// The node_id of the node which should receive this message + node_id: PublicKey, + /// The message which should be sent. + msg: TxAddInput, + }, + /// Used to indicate that a tx_add_output message should be sent to the peer with the given node_id. + SendTxAddOutput { + /// The node_id of the node which should receive this message + node_id: PublicKey, + /// The message which should be sent. + msg: TxAddOutput, + }, + /// Used to indicate that a tx_remove_input message should be sent to the peer with the given node_id. + SendTxRemoveInput { + /// The node_id of the node which should receive this message + node_id: PublicKey, + /// The message which should be sent. + msg: TxRemoveInput, + }, + /// Used to indicate that a tx_remove_output message should be sent to the peer with the given node_id. + SendTxRemoveOutput { + /// The node_id of the node which should receive this message + node_id: PublicKey, + /// The message which should be sent. + msg: TxRemoveOutput, + }, + /// Used to indicate that a tx_complete message should be sent to the peer with the given node_id. + SendTxComplete { + /// The node_id of the node which should receive this message + node_id: PublicKey, + /// The message which should be sent. + msg: TxComplete, + }, + /// Used to indicate that a tx_signatures message should be sent to the peer with the given node_id. + SendTxSignatures { + /// The node_id of the node which should receive this message + node_id: PublicKey, + /// The message which should be sent. + msg: TxSignatures, + }, + /// Used to indicate that a tx_init_rbf message should be sent to the peer with the given node_id. + SendTxInitRbf { + /// The node_id of the node which should receive this message + node_id: PublicKey, + /// The message which should be sent. + msg: TxInitRbf, + }, + /// Used to indicate that a tx_ack_rbf message should be sent to the peer with the given node_id. + SendTxAckRbf { + /// The node_id of the node which should receive this message + node_id: PublicKey, + /// The message which should be sent. + msg: TxAckRbf, + }, + /// Used to indicate that a tx_abort message should be sent to the peer with the given node_id. + SendTxAbort { + /// The node_id of the node which should receive this message + node_id: PublicKey, + /// The message which should be sent. + msg: TxAbort, + }, + /// Used to indicate that a channel_ready message should be sent to the peer with the given node_id. + SendChannelReady { + /// The node_id of the node which should receive these message(s) + node_id: PublicKey, + /// The channel_ready message which should be sent. + msg: ChannelReady, + }, + /// Used to indicate that an announcement_signatures message should be sent to the peer with the given node_id. + SendAnnouncementSignatures { + /// The node_id of the node which should receive these message(s) + node_id: PublicKey, + /// The announcement_signatures message which should be sent. + msg: AnnouncementSignatures, + }, + /// Used to indicate that a series of HTLC update messages, as well as a commitment_signed + /// message should be sent to the peer with the given node_id. + UpdateHTLCs { + /// The node_id of the node which should receive these message(s) + node_id: PublicKey, + /// The update messages which should be sent. ALL messages in the struct should be sent! + updates: CommitmentUpdate, + }, + /// Used to indicate that a revoke_and_ack message should be sent to the peer with the given node_id. + SendRevokeAndACK { + /// The node_id of the node which should receive this message + node_id: PublicKey, + /// The message which should be sent. + msg: RevokeAndACK, + }, + /// Used to indicate that a closing_signed message should be sent to the peer with the given node_id. + SendClosingSigned { + /// The node_id of the node which should receive this message + node_id: PublicKey, + /// The message which should be sent. + msg: ClosingSigned, + }, + /// Used to indicate that a shutdown message should be sent to the peer with the given node_id. + SendShutdown { + /// The node_id of the node which should receive this message + node_id: PublicKey, + /// The message which should be sent. + msg: Shutdown, + }, + /// Used to indicate that a channel_reestablish message should be sent to the peer with the given node_id. + SendChannelReestablish { + /// The node_id of the node which should receive this message + node_id: PublicKey, + /// The message which should be sent. + msg: ChannelReestablish, + }, + /// Used to send a channel_announcement and channel_update to a specific peer, likely on + /// initial connection to ensure our peers know about our channels. + SendChannelAnnouncement { + /// The node_id of the node which should receive this message + node_id: PublicKey, + /// The channel_announcement which should be sent. + msg: ChannelAnnouncement, + /// The followup channel_update which should be sent. + update_msg: ChannelUpdate, + }, + /// Used to indicate that a channel_announcement and channel_update should be broadcast to all + /// peers (except the peer with node_id either msg.contents.node_id_1 or msg.contents.node_id_2). + /// + /// Note that after doing so, you very likely (unless you did so very recently) want to + /// broadcast a node_announcement (e.g. via [`PeerManager::broadcast_node_announcement`]). This + /// ensures that any nodes which see our channel_announcement also have a relevant + /// node_announcement, including relevant feature flags which may be important for routing + /// through or to us. + /// + /// [`PeerManager::broadcast_node_announcement`]: crate::ln::peer_handler::PeerManager::broadcast_node_announcement + BroadcastChannelAnnouncement { + /// The channel_announcement which should be sent. + msg: ChannelAnnouncement, + /// The followup channel_update which should be sent. + update_msg: Option, + }, + /// Used to indicate that a channel_update should be broadcast to all peers. + BroadcastChannelUpdate { + /// The channel_update which should be sent. + msg: ChannelUpdate, + }, + /// Used to indicate that a node_announcement should be broadcast to all peers. + BroadcastNodeAnnouncement { + /// The node_announcement which should be sent. + msg: NodeAnnouncement, + }, + /// Used to indicate that a channel_update should be sent to a single peer. + /// In contrast to [`Self::BroadcastChannelUpdate`], this is used when the channel is a + /// private channel and we shouldn't be informing all of our peers of channel parameters. + SendChannelUpdate { + /// The node_id of the node which should receive this message + node_id: PublicKey, + /// The channel_update which should be sent. + msg: ChannelUpdate, + }, + /// Broadcast an error downstream to be handled + HandleError { + /// The node_id of the node which should receive this message + node_id: PublicKey, + /// The action which should be taken. + action: ErrorAction + }, + /// Query a peer for channels with funding transaction UTXOs in a block range. + SendChannelRangeQuery { + /// The node_id of this message recipient + node_id: PublicKey, + /// The query_channel_range which should be sent. + msg: QueryChannelRange, + }, + /// Request routing gossip messages from a peer for a list of channels identified by + /// their short_channel_ids. + SendShortIdsQuery { + /// The node_id of this message recipient + node_id: PublicKey, + /// The query_short_channel_ids which should be sent. + msg: QueryShortChannelIds, + }, + /// Sends a reply to a channel range query. This may be one of several SendReplyChannelRange events + /// emitted during processing of the query. + SendReplyChannelRange { + /// The node_id of this message recipient + node_id: PublicKey, + /// The reply_channel_range which should be sent. + msg: ReplyChannelRange, + }, + /// Sends a timestamp filter for inbound gossip. This should be sent on each new connection to + /// enable receiving gossip messages from the peer. + SendGossipTimestampFilter { + /// The node_id of this message recipient + node_id: PublicKey, + /// The gossip_timestamp_filter which should be sent. + msg: GossipTimestampFilter, + }, + /// Sends a channel partner Peer Storage of our backup which they should store. + /// This should be sent on each new connection to the channel partner or whenever we want + /// them to update the backup that they store. + SendPeerStorage { + /// The node_id of this message recipient + node_id: PublicKey, + /// The peer_storage which should be sent. + msg: PeerStorage, + }, + /// Sends a channel partner their own peer storage which we store and update when they send + /// a [`PeerStorage`]. + SendPeerStorageRetrieval { + /// The node_id of this message recipient + node_id: PublicKey, + /// The peer_storage_retrieval which should be sent. + msg: PeerStorageRetrieval, + } +} + +/// A trait to describe an object which handles when peers connect + disconnect and generates +/// outbound messages. +/// +/// It acts as a supertrait for all the P2P message handlers and can contribute to the +/// [`InitFeatures`] which we send to peers or decide to refuse connection to peers. +pub trait BaseMessageHandler { + /// Gets the list of pending events which were generated by previous actions, clearing the list + /// in the process. + fn get_and_clear_pending_msg_events(&self) -> Vec; + + /// Indicates a connection to the peer failed/an existing connection was lost. + fn peer_disconnected(&self, their_node_id: PublicKey); + + /// Gets the node feature flags which this handler itself supports. All available handlers are + /// queried similarly and their feature flags are OR'd together to form the [`NodeFeatures`] + /// which are broadcasted in our [`NodeAnnouncement`] message. + fn provided_node_features(&self) -> NodeFeatures; + + /// Gets the init feature flags which should be sent to the given peer. All available handlers + /// are queried similarly and their feature flags are OR'd together to form the [`InitFeatures`] + /// which are sent in our [`Init`] message. + /// + /// Note that this method is called before [`Self::peer_connected`]. + fn provided_init_features(&self, their_node_id: PublicKey) -> InitFeatures; + + /// Handle a peer (re)connecting. + /// + /// May return an `Err(())` if the features the peer supports are not sufficient to communicate + /// with us. Implementors should be somewhat conservative about doing so, however, as other + /// message handlers may still wish to communicate with this peer. + /// + /// [`Self::peer_disconnected`] will not be called if `Err(())` is returned. + fn peer_connected(&self, their_node_id: PublicKey, msg: &Init, inbound: bool) -> Result<(), ()>; +} + /// A trait to describe an object which can receive channel messages. /// /// Messages MAY be called in parallel when they originate from different `their_node_ids`, however /// they MUST NOT be called in parallel when the two calls have the same `their_node_id`. -pub trait ChannelMessageHandler : MessageSendEventsProvider { +pub trait ChannelMessageHandler : BaseMessageHandler { // Channel init: /// Handle an incoming `open_channel` message from the given peer. fn handle_open_channel(&self, their_node_id: PublicKey, msg: &OpenChannel); @@ -1606,18 +1939,7 @@ pub trait ChannelMessageHandler : MessageSendEventsProvider { /// Handle an incoming `announcement_signatures` message from the given peer. fn handle_announcement_signatures(&self, their_node_id: PublicKey, msg: &AnnouncementSignatures); - // Connection loss/reestablish: - /// Indicates a connection to the peer failed/an existing connection was lost. - fn peer_disconnected(&self, their_node_id: PublicKey); - - /// Handle a peer reconnecting, possibly generating `channel_reestablish` message(s). - /// - /// May return an `Err(())` if the features the peer supports are not sufficient to communicate - /// with us. Implementors should be somewhat conservative about doing so, however, as other - /// message handlers may still wish to communicate with this peer. - /// - /// [`Self::peer_disconnected`] will not be called if `Err(())` is returned. - fn peer_connected(&self, their_node_id: PublicKey, msg: &Init, inbound: bool) -> Result<(), ()>; + // Channel reestablish: /// Handle an incoming `channel_reestablish` message from the given peer. fn handle_channel_reestablish(&self, their_node_id: PublicKey, msg: &ChannelReestablish); @@ -1629,18 +1951,6 @@ pub trait ChannelMessageHandler : MessageSendEventsProvider { fn handle_error(&self, their_node_id: PublicKey, msg: &ErrorMessage); // Handler information: - /// Gets the node feature flags which this handler itself supports. All available handlers are - /// queried similarly and their feature flags are OR'd together to form the [`NodeFeatures`] - /// which are broadcasted in our [`NodeAnnouncement`] message. - fn provided_node_features(&self) -> NodeFeatures; - - /// Gets the init feature flags which should be sent to the given peer. All available handlers - /// are queried similarly and their feature flags are OR'd together to form the [`InitFeatures`] - /// which are sent in our [`Init`] message. - /// - /// Note that this method is called before [`Self::peer_connected`]. - fn provided_init_features(&self, their_node_id: PublicKey) -> InitFeatures; - /// Gets the chain hashes for this `ChannelMessageHandler` indicating which chains it supports. /// /// If it's `None`, then no particular network chain hash compatibility will be enforced when @@ -1663,7 +1973,7 @@ pub trait ChannelMessageHandler : MessageSendEventsProvider { /// For messages enabled with the `gossip_queries` feature there are potential DoS vectors when /// handling inbound queries. Implementors using an on-disk network graph should be aware of /// repeated disk I/O for queries accessing different parts of the network graph. -pub trait RoutingMessageHandler : MessageSendEventsProvider { +pub trait RoutingMessageHandler : BaseMessageHandler { /// Handle an incoming `node_announcement` message, returning `true` if it should be forwarded on, /// `false` or returning an `Err` otherwise. /// @@ -1688,18 +1998,6 @@ pub trait RoutingMessageHandler : MessageSendEventsProvider { /// higher (as defined by `::cmp`) than `starting_point`. /// If `None` is provided for `starting_point`, we start at the first node. fn get_next_node_announcement(&self, starting_point: Option<&NodeId>) -> Option; - /// Called when a connection is established with a peer. This can be used to - /// perform routing table synchronization using a strategy defined by the - /// implementor. - /// - /// May return an `Err(())` if the features the peer supports are not sufficient to communicate - /// with us. Implementors should be somewhat conservative about doing so, however, as other - /// message handlers may still wish to communicate with this peer. - /// - /// [`Self::peer_disconnected`] will not be called if `Err(())` is returned. - fn peer_connected(&self, their_node_id: PublicKey, init: &Init, inbound: bool) -> Result<(), ()>; - /// Indicates a connection to the peer failed/an existing connection was lost. - fn peer_disconnected(&self, their_node_id: PublicKey); /// Handles the reply of a query we initiated to learn about channels /// for a given range of blocks. We can expect to receive one or more /// replies to a single query. @@ -1722,58 +2020,22 @@ pub trait RoutingMessageHandler : MessageSendEventsProvider { /// caller should seek to reduce the rate of new gossip messages handled, especially /// [`ChannelAnnouncement`]s. fn processing_queue_high(&self) -> bool; - - // Handler information: - /// Gets the node feature flags which this handler itself supports. All available handlers are - /// queried similarly and their feature flags are OR'd together to form the [`NodeFeatures`] - /// which are broadcasted in our [`NodeAnnouncement`] message. - fn provided_node_features(&self) -> NodeFeatures; - /// Gets the init feature flags which should be sent to the given peer. All available handlers - /// are queried similarly and their feature flags are OR'd together to form the [`InitFeatures`] - /// which are sent in our [`Init`] message. - /// - /// Note that this method is called before [`Self::peer_connected`]. - fn provided_init_features(&self, their_node_id: PublicKey) -> InitFeatures; } /// A handler for received [`OnionMessage`]s and for providing generated ones to send. -pub trait OnionMessageHandler { +pub trait OnionMessageHandler: BaseMessageHandler { /// Handle an incoming `onion_message` message from the given peer. fn handle_onion_message(&self, peer_node_id: PublicKey, msg: &OnionMessage); /// Returns the next pending onion message for the peer with the given node id. - fn next_onion_message_for_peer(&self, peer_node_id: PublicKey) -> Option; - - /// Called when a connection is established with a peer. Can be used to track which peers - /// advertise onion message support and are online. - /// - /// May return an `Err(())` if the features the peer supports are not sufficient to communicate - /// with us. Implementors should be somewhat conservative about doing so, however, as other - /// message handlers may still wish to communicate with this peer. /// - /// [`Self::peer_disconnected`] will not be called if `Err(())` is returned. - fn peer_connected(&self, their_node_id: PublicKey, init: &Init, inbound: bool) -> Result<(), ()>; - - /// Indicates a connection to the peer failed/an existing connection was lost. Allows handlers to - /// drop and refuse to forward onion messages to this peer. - fn peer_disconnected(&self, their_node_id: PublicKey); + /// Note that onion messages can only be provided upstream via this method and *not* via + /// [`BaseMessageHandler::get_and_clear_pending_msg_events`]. + fn next_onion_message_for_peer(&self, peer_node_id: PublicKey) -> Option; /// Performs actions that should happen roughly every ten seconds after startup. Allows handlers - /// to drop any buffered onion messages intended for prospective peers. + /// to drop any buffered onion messages intended for prospective peerst. fn timer_tick_occurred(&self); - - // Handler information: - /// Gets the node feature flags which this handler itself supports. All available handlers are - /// queried similarly and their feature flags are OR'd together to form the [`NodeFeatures`] - /// which are broadcasted in our [`NodeAnnouncement`] message. - fn provided_node_features(&self) -> NodeFeatures; - - /// Gets the init feature flags which should be sent to the given peer. All available handlers - /// are queried similarly and their feature flags are OR'd together to form the [`InitFeatures`] - /// which are sent in our [`Init`] message. - /// - /// Note that this method is called before [`Self::peer_connected`]. - fn provided_init_features(&self, their_node_id: PublicKey) -> InitFeatures; } #[derive(Clone, Debug, PartialEq, Eq)] diff --git a/lightning/src/ln/offers_tests.rs b/lightning/src/ln/offers_tests.rs index 1a7a63a0681..55d2497bf8c 100644 --- a/lightning/src/ln/offers_tests.rs +++ b/lightning/src/ln/offers_tests.rs @@ -47,11 +47,11 @@ use crate::blinded_path::IntroductionNode; use crate::blinded_path::message::BlindedMessagePath; use crate::blinded_path::payment::{Bolt12OfferContext, Bolt12RefundContext, PaymentContext}; use crate::blinded_path::message::{MessageContext, OffersContext}; -use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEventsProvider, PaymentFailureReason, PaymentPurpose}; +use crate::events::{ClosureReason, Event, HTLCDestination, PaymentFailureReason, PaymentPurpose}; use crate::ln::channelmanager::{Bolt12PaymentError, MAX_SHORT_LIVED_RELATIVE_EXPIRY, PaymentId, RecentPaymentDetails, RecipientOnionFields, Retry, self}; use crate::types::features::Bolt12InvoiceFeatures; use crate::ln::functional_test_utils::*; -use crate::ln::msgs::{ChannelMessageHandler, Init, NodeAnnouncement, OnionMessage, OnionMessageHandler, RoutingMessageHandler, SocketAddress, UnsignedGossipMessage, UnsignedNodeAnnouncement}; +use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, Init, NodeAnnouncement, OnionMessage, OnionMessageHandler, RoutingMessageHandler, SocketAddress, UnsignedGossipMessage, UnsignedNodeAnnouncement}; use crate::ln::outbound_payment::IDEMPOTENCY_TIMEOUT_TICKS; use crate::offers::invoice::Bolt12Invoice; use crate::offers::invoice_error::InvoiceError; diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index cee8bd1d301..f23cfaacdb0 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -13,7 +13,7 @@ use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS}; use crate::sign::{EntropySource, NodeSigner, Recipient}; -use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentFailureReason}; +use crate::events::{Event, HTLCDestination, PathFailure, PaymentFailureReason}; use crate::types::payment::{PaymentHash, PaymentSecret}; use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS; use crate::ln::channelmanager::{HTLCForwardInfo, FailureCode, CLTV_FAR_FAR_AWAY, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA, PendingAddHTLCInfo, PendingHTLCInfo, PendingHTLCRouting, PaymentId, RecipientOnionFields}; @@ -23,7 +23,10 @@ use crate::routing::router::{get_route, PaymentParameters, Route, RouteParameter use crate::types::features::{InitFeatures, Bolt11InvoiceFeatures}; use crate::ln::functional_test_utils::test_default_channel_config; use crate::ln::msgs; -use crate::ln::msgs::{ChannelMessageHandler, ChannelUpdate, FinalOnionHopData, OutboundOnionPayload, OutboundTrampolinePayload}; +use crate::ln::msgs::{ + BaseMessageHandler, ChannelMessageHandler, ChannelUpdate, FinalOnionHopData, + OutboundOnionPayload, OutboundTrampolinePayload, MessageSendEvent, +}; use crate::ln::wire::Encode; use crate::util::ser::{Writeable, Writer, BigSize}; use crate::util::test_utils; diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 66322cf107e..5adc2d66b11 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -14,7 +14,7 @@ use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen}; use crate::chain::channelmonitor::{ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS}; use crate::sign::EntropySource; -use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentFailureReason, PaymentPurpose}; +use crate::events::{ClosureReason, Event, HTLCDestination, PathFailure, PaymentFailureReason, PaymentPurpose}; use crate::ln::channel::{EXPIRE_PREV_CONFIG_TICKS, get_holder_selected_channel_reserve_satoshis, ANCHOR_OUTPUT_VALUE_SATOSHI}; use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, RecentPaymentDetails, RecipientOnionFields, HTLCForwardInfo, PendingHTLCRouting, PendingAddHTLCInfo}; use crate::types::features::{Bolt11InvoiceFeatures, ChannelTypeFeatures}; @@ -22,7 +22,7 @@ use crate::ln::msgs; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentHash, PaymentSecret, PaymentPreimage}; use crate::ln::chan_utils; -use crate::ln::msgs::ChannelMessageHandler; +use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; use crate::ln::onion_utils; use crate::ln::outbound_payment::{IDEMPOTENCY_TIMEOUT_TICKS, ProbeSendFailure, Retry, RetryableSendFailure}; use crate::routing::gossip::{EffectiveCapacity, RoutingFees}; diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 89d54a8cdcd..336ce40cf11 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -20,11 +20,10 @@ use bitcoin::secp256k1::{self, Secp256k1, SecretKey, PublicKey}; use crate::blinded_path::message::{AsyncPaymentsContext, DNSResolverContext, OffersContext}; use crate::sign::{NodeSigner, Recipient}; -use crate::events::{MessageSendEvent, MessageSendEventsProvider}; use crate::ln::types::ChannelId; use crate::types::features::{InitFeatures, NodeFeatures}; use crate::ln::msgs; -use crate::ln::msgs::{ChannelMessageHandler, Init, LightningError, SocketAddress, OnionMessageHandler, RoutingMessageHandler}; +use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, Init, LightningError, SocketAddress, MessageSendEvent, OnionMessageHandler, RoutingMessageHandler}; use crate::util::ser::{VecWriter, Writeable, Writer}; use crate::ln::peer_channel_encryptor::{PeerChannelEncryptor, NextNoiseStep, MessageBuf, MSG_BUF_ALLOC_SIZE}; use crate::ln::wire; @@ -110,7 +109,14 @@ pub trait CustomMessageHandler: wire::CustomMessageReader { /// A dummy struct which implements `RoutingMessageHandler` without storing any routing information /// or doing any processing. You can provide one of these as the route_handler in a MessageHandler. pub struct IgnoringMessageHandler{} -impl MessageSendEventsProvider for IgnoringMessageHandler { +impl BaseMessageHandler for IgnoringMessageHandler { + fn peer_disconnected(&self, _their_node_id: PublicKey) {} + fn peer_connected(&self, _their_node_id: PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) } + fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() } + fn provided_init_features(&self, _their_node_id: PublicKey) -> InitFeatures { + InitFeatures::empty() + } + fn get_and_clear_pending_msg_events(&self) -> Vec { Vec::new() } } impl RoutingMessageHandler for IgnoringMessageHandler { @@ -120,29 +126,17 @@ impl RoutingMessageHandler for IgnoringMessageHandler { fn get_next_channel_announcement(&self, _starting_point: u64) -> Option<(msgs::ChannelAnnouncement, Option, Option)> { None } fn get_next_node_announcement(&self, _starting_point: Option<&NodeId>) -> Option { None } - fn peer_connected(&self, _their_node_id: PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) } - fn peer_disconnected(&self, _their_node_id: PublicKey) { } fn handle_reply_channel_range(&self, _their_node_id: PublicKey, _msg: msgs::ReplyChannelRange) -> Result<(), LightningError> { Ok(()) } fn handle_reply_short_channel_ids_end(&self, _their_node_id: PublicKey, _msg: msgs::ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) } fn handle_query_channel_range(&self, _their_node_id: PublicKey, _msg: msgs::QueryChannelRange) -> Result<(), LightningError> { Ok(()) } fn handle_query_short_channel_ids(&self, _their_node_id: PublicKey, _msg: msgs::QueryShortChannelIds) -> Result<(), LightningError> { Ok(()) } - fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() } - fn provided_init_features(&self, _their_node_id: PublicKey) -> InitFeatures { - InitFeatures::empty() - } fn processing_queue_high(&self) -> bool { false } } impl OnionMessageHandler for IgnoringMessageHandler { fn handle_onion_message(&self, _their_node_id: PublicKey, _msg: &msgs::OnionMessage) {} fn next_onion_message_for_peer(&self, _peer_node_id: PublicKey) -> Option { None } - fn peer_connected(&self, _their_node_id: PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) } - fn peer_disconnected(&self, _their_node_id: PublicKey) {} fn timer_tick_occurred(&self) {} - fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() } - fn provided_init_features(&self, _their_node_id: PublicKey) -> InitFeatures { - InitFeatures::empty() - } } impl OffersMessageHandler for IgnoringMessageHandler { @@ -252,7 +246,31 @@ impl ErroringMessageHandler { }); } } -impl MessageSendEventsProvider for ErroringMessageHandler { +impl BaseMessageHandler for ErroringMessageHandler { + fn peer_disconnected(&self, _their_node_id: PublicKey) {} + fn peer_connected(&self, _their_node_id: PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) } + fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() } + fn provided_init_features(&self, _their_node_id: PublicKey) -> InitFeatures { + // Set a number of features which various nodes may require to talk to us. It's totally + // reasonable to indicate we "support" all kinds of channel features...we just reject all + // channels. + let mut features = InitFeatures::empty(); + features.set_data_loss_protect_optional(); + features.set_upfront_shutdown_script_optional(); + features.set_variable_length_onion_optional(); + features.set_static_remote_key_optional(); + features.set_payment_secret_optional(); + features.set_basic_mpp_optional(); + features.set_wumbo_optional(); + features.set_shutdown_any_segwit_optional(); + features.set_dual_fund_optional(); + features.set_channel_type_optional(); + features.set_scid_privacy_optional(); + features.set_zero_conf_optional(); + features.set_route_blinding_optional(); + features + } + fn get_and_clear_pending_msg_events(&self) -> Vec { let mut res = Vec::new(); mem::swap(&mut res, &mut self.message_queue.lock().unwrap()); @@ -327,32 +345,11 @@ impl ChannelMessageHandler for ErroringMessageHandler { } // msgs::ChannelUpdate does not contain the channel_id field, so we just drop them. fn handle_channel_update(&self, _their_node_id: PublicKey, _msg: &msgs::ChannelUpdate) {} + fn handle_peer_storage(&self, _their_node_id: PublicKey, _msg: msgs::PeerStorage) {} fn handle_peer_storage_retrieval(&self, _their_node_id: PublicKey, _msg: msgs::PeerStorageRetrieval) {} - fn peer_disconnected(&self, _their_node_id: PublicKey) {} - fn peer_connected(&self, _their_node_id: PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) } + fn handle_error(&self, _their_node_id: PublicKey, _msg: &msgs::ErrorMessage) {} - fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() } - fn provided_init_features(&self, _their_node_id: PublicKey) -> InitFeatures { - // Set a number of features which various nodes may require to talk to us. It's totally - // reasonable to indicate we "support" all kinds of channel features...we just reject all - // channels. - let mut features = InitFeatures::empty(); - features.set_data_loss_protect_optional(); - features.set_upfront_shutdown_script_optional(); - features.set_variable_length_onion_optional(); - features.set_static_remote_key_optional(); - features.set_payment_secret_optional(); - features.set_basic_mpp_optional(); - features.set_wumbo_optional(); - features.set_shutdown_any_segwit_optional(); - features.set_dual_fund_optional(); - features.set_channel_type_optional(); - features.set_scid_privacy_optional(); - features.set_zero_conf_optional(); - features.set_route_blinding_optional(); - features - } fn get_chain_hashes(&self) -> Option> { // We don't enforce any chains upon peer connection for `ErroringMessageHandler` and leave it up @@ -2130,9 +2127,6 @@ impl { @@ -2489,13 +2483,22 @@ impl) { if addresses.len() > 100 { panic!("More than half the message size was taken up by public addresses!"); @@ -2809,7 +2812,6 @@ mod tests { use super::*; use crate::sign::{NodeSigner, Recipient}; - use crate::events; use crate::io; use crate::ln::types::ChannelId; use crate::types::features::{InitFeatures, NodeFeatures}; @@ -3097,7 +3099,7 @@ mod tests { if peers[0].read_event(&mut fd_a, &b_data).is_err() { break; } cfgs[0].chan_handler.pending_events.lock().unwrap() - .push(crate::events::MessageSendEvent::SendShutdown { + .push(MessageSendEvent::SendShutdown { node_id: peers[1].node_signer.get_node_id(Recipient::Node).unwrap(), msg: msgs::Shutdown { channel_id: ChannelId::new_zero(), @@ -3105,7 +3107,7 @@ mod tests { }, }); cfgs[1].chan_handler.pending_events.lock().unwrap() - .push(crate::events::MessageSendEvent::SendShutdown { + .push(MessageSendEvent::SendShutdown { node_id: peers[0].node_signer.get_node_id(Recipient::Node).unwrap(), msg: msgs::Shutdown { channel_id: ChannelId::new_zero(), @@ -3203,7 +3205,7 @@ mod tests { assert_eq!(peers[0].peers.read().unwrap().len(), 1); let their_id = peers[1].node_signer.get_node_id(Recipient::Node).unwrap(); - cfgs[0].chan_handler.pending_events.lock().unwrap().push(events::MessageSendEvent::HandleError { + cfgs[0].chan_handler.pending_events.lock().unwrap().push(MessageSendEvent::HandleError { node_id: their_id, action: msgs::ErrorAction::DisconnectPeer { msg: None }, }); @@ -3226,7 +3228,7 @@ mod tests { let their_id = peers[1].node_signer.get_node_id(Recipient::Node).unwrap(); let msg = msgs::Shutdown { channel_id: ChannelId::from_bytes([42; 32]), scriptpubkey: bitcoin::ScriptBuf::new() }; - a_chan_handler.pending_events.lock().unwrap().push(events::MessageSendEvent::SendShutdown { + a_chan_handler.pending_events.lock().unwrap().push(MessageSendEvent::SendShutdown { node_id: their_id, msg: msg.clone() }); peers[0].message_handler.chan_handler = &a_chan_handler; diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index 97d3c68f9f6..a63e4264805 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -12,14 +12,14 @@ //! LSP). use crate::chain::ChannelMonitorUpdateStatus; -use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider}; +use crate::events::{ClosureReason, Event, HTLCDestination}; use crate::ln::channelmanager::{MIN_CLTV_EXPIRY_DELTA, PaymentId, RecipientOnionFields}; use crate::routing::gossip::RoutingFees; use crate::routing::router::{PaymentParameters, RouteHint, RouteHintHop}; use crate::types::features::ChannelTypeFeatures; use crate::ln::msgs; use crate::ln::types::ChannelId; -use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction}; +use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, RoutingMessageHandler, ErrorAction, MessageSendEvent}; use crate::util::config::{MaxDustHTLCExposure, UserConfig}; use crate::util::ser::Writeable; diff --git a/lightning/src/ln/quiescence_tests.rs b/lightning/src/ln/quiescence_tests.rs index be4132ed629..15df7f3293e 100644 --- a/lightning/src/ln/quiescence_tests.rs +++ b/lightning/src/ln/quiescence_tests.rs @@ -1,14 +1,11 @@ use crate::chain::ChannelMonitorUpdateStatus; -use crate::events::Event; -use crate::events::HTLCDestination; -use crate::events::MessageSendEvent; -use crate::events::MessageSendEventsProvider; +use crate::events::{Event, HTLCDestination}; use crate::ln::channel::DISCONNECT_PEER_AWAITING_RESPONSE_TICKS; use crate::ln::channelmanager::PaymentId; use crate::ln::channelmanager::RecipientOnionFields; use crate::ln::functional_test_utils::*; use crate::ln::msgs; -use crate::ln::msgs::{ChannelMessageHandler, ErrorAction}; +use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, ErrorAction, MessageSendEvent}; use crate::util::errors::APIError; use crate::util::test_channel_signer::SignerOp; diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 24d5b0987a9..7c3d544eb3b 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -14,11 +14,11 @@ use crate::chain::chaininterface::LowerBoundedFeeEstimator; use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateStep}; use crate::sign::EntropySource; use crate::chain::transaction::OutPoint; -use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider}; +use crate::events::{ClosureReason, Event, HTLCDestination}; use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RecipientOnionFields}; use crate::ln::msgs; use crate::ln::types::ChannelId; -use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction}; +use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, RoutingMessageHandler, ErrorAction, MessageSendEvent}; use crate::util::test_channel_signer::TestChannelSigner; use crate::util::test_utils; use crate::util::errors::APIError; diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index b1b4f77c590..934ca0d5fdc 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -13,8 +13,8 @@ use crate::chain::chaininterface::LowerBoundedFeeEstimator; use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS}; use crate::chain::transaction::OutPoint; use crate::chain::Confirm; -use crate::events::{Event, MessageSendEventsProvider, ClosureReason, HTLCDestination, MessageSendEvent}; -use crate::ln::msgs::{ChannelMessageHandler, Init}; +use crate::events::{Event, ClosureReason, HTLCDestination}; +use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, Init, MessageSendEvent}; use crate::ln::types::ChannelId; use crate::sign::OutputSpender; use crate::util::ser::Writeable; diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index c068d7f12d6..f68a9843f58 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -13,13 +13,13 @@ use crate::sign::{EntropySource, SignerProvider}; use crate::chain::ChannelMonitorUpdateStatus; use crate::chain::transaction::OutPoint; -use crate::events::{Event, MessageSendEvent, HTLCDestination, MessageSendEventsProvider, ClosureReason}; +use crate::events::{Event, HTLCDestination, ClosureReason}; use crate::ln::channel_state::{ChannelDetails, ChannelShutdownState}; use crate::ln::channelmanager::{self, PaymentId, RecipientOnionFields, Retry}; use crate::routing::router::{PaymentParameters, get_route, RouteParameters}; use crate::ln::msgs; use crate::ln::types::ChannelId; -use crate::ln::msgs::{ChannelMessageHandler, ErrorAction}; +use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, ErrorAction, MessageSendEvent}; use crate::ln::onion_utils::INVALID_ONION_BLINDING; use crate::ln::script::ShutdownScript; use crate::util::test_utils; diff --git a/lightning/src/onion_message/functional_tests.rs b/lightning/src/onion_message/functional_tests.rs index 239625de9e4..e007bb616d2 100644 --- a/lightning/src/onion_message/functional_tests.rs +++ b/lightning/src/onion_message/functional_tests.rs @@ -25,7 +25,7 @@ use crate::blinded_path::message::{ }; use crate::blinded_path::EmptyNodeIdLookUp; use crate::events::{Event, EventsProvider}; -use crate::ln::msgs::{self, DecodeError, OnionMessageHandler}; +use crate::ln::msgs::{self, BaseMessageHandler, DecodeError, OnionMessageHandler}; use crate::routing::gossip::{NetworkGraph, P2PGossipSync}; use crate::routing::test_utils::{add_channel, add_or_update_node}; use crate::sign::{NodeSigner, Recipient}; diff --git a/lightning/src/onion_message/messenger.rs b/lightning/src/onion_message/messenger.rs index ce5a37f1b31..df317d45df2 100644 --- a/lightning/src/onion_message/messenger.rs +++ b/lightning/src/onion_message/messenger.rs @@ -33,7 +33,9 @@ use crate::blinded_path::message::{ use crate::blinded_path::utils; use crate::blinded_path::{IntroductionNode, NodeIdLookUp}; use crate::events::{Event, EventHandler, EventsProvider, ReplayEvent}; -use crate::ln::msgs::{self, OnionMessage, OnionMessageHandler, SocketAddress}; +use crate::ln::msgs::{ + self, BaseMessageHandler, MessageSendEvent, OnionMessage, OnionMessageHandler, SocketAddress, +}; use crate::ln::onion_utils; use crate::routing::gossip::{NetworkGraph, NodeId, ReadOnlyNetworkGraph}; use crate::sign::{EntropySource, NodeSigner, Recipient}; @@ -1796,6 +1798,78 @@ where } } +impl< + ES: Deref, + NS: Deref, + L: Deref, + NL: Deref, + MR: Deref, + OMH: Deref, + APH: Deref, + DRH: Deref, + CMH: Deref, + > BaseMessageHandler for OnionMessenger +where + ES::Target: EntropySource, + NS::Target: NodeSigner, + L::Target: Logger, + NL::Target: NodeIdLookUp, + MR::Target: MessageRouter, + OMH::Target: OffersMessageHandler, + APH::Target: AsyncPaymentsMessageHandler, + DRH::Target: DNSResolverMessageHandler, + CMH::Target: CustomOnionMessageHandler, +{ + fn provided_node_features(&self) -> NodeFeatures { + let mut features = NodeFeatures::empty(); + features.set_onion_messages_optional(); + features | self.dns_resolver_handler.provided_node_features() + } + + fn provided_init_features(&self, _their_node_id: PublicKey) -> InitFeatures { + let mut features = InitFeatures::empty(); + features.set_onion_messages_optional(); + features + } + + fn peer_connected( + &self, their_node_id: PublicKey, init: &msgs::Init, _inbound: bool, + ) -> Result<(), ()> { + if init.features.supports_onion_messages() { + { + let mut message_recipients = self.message_recipients.lock().unwrap(); + message_recipients + .entry(their_node_id) + .or_insert_with(|| OnionMessageRecipient::ConnectedPeer(VecDeque::new())) + .mark_connected(); + } + if self.intercept_messages_for_offline_peers { + let mut pending_peer_connected_events = + self.pending_peer_connected_events.lock().unwrap(); + pending_peer_connected_events + .push(Event::OnionMessagePeerConnected { peer_node_id: their_node_id }); + self.event_notifier.notify(); + } + } else { + self.message_recipients.lock().unwrap().remove(&their_node_id); + } + + Ok(()) + } + + fn peer_disconnected(&self, their_node_id: PublicKey) { + match self.message_recipients.lock().unwrap().remove(&their_node_id) { + Some(OnionMessageRecipient::ConnectedPeer(..)) => {}, + Some(_) => debug_assert!(false), + None => {}, + } + } + + fn get_and_clear_pending_msg_events(&self) -> Vec { + Vec::new() + } +} + impl< ES: Deref, NS: Deref, @@ -1972,39 +2046,6 @@ where } } - fn peer_connected( - &self, their_node_id: PublicKey, init: &msgs::Init, _inbound: bool, - ) -> Result<(), ()> { - if init.features.supports_onion_messages() { - { - let mut message_recipients = self.message_recipients.lock().unwrap(); - message_recipients - .entry(their_node_id) - .or_insert_with(|| OnionMessageRecipient::ConnectedPeer(VecDeque::new())) - .mark_connected(); - } - if self.intercept_messages_for_offline_peers { - let mut pending_peer_connected_events = - self.pending_peer_connected_events.lock().unwrap(); - pending_peer_connected_events - .push(Event::OnionMessagePeerConnected { peer_node_id: their_node_id }); - self.event_notifier.notify(); - } - } else { - self.message_recipients.lock().unwrap().remove(&their_node_id); - } - - Ok(()) - } - - fn peer_disconnected(&self, their_node_id: PublicKey) { - match self.message_recipients.lock().unwrap().remove(&their_node_id) { - Some(OnionMessageRecipient::ConnectedPeer(..)) => {}, - Some(_) => debug_assert!(false), - None => {}, - } - } - fn timer_tick_occurred(&self) { let mut message_recipients = self.message_recipients.lock().unwrap(); @@ -2025,18 +2066,6 @@ where } } - fn provided_node_features(&self) -> NodeFeatures { - let mut features = NodeFeatures::empty(); - features.set_onion_messages_optional(); - features | self.dns_resolver_handler.provided_node_features() - } - - fn provided_init_features(&self, _their_node_id: PublicKey) -> InitFeatures { - let mut features = InitFeatures::empty(); - features.set_onion_messages_optional(); - features - } - // Before returning any messages to send for the peer, this method will see if any messages were // enqueued in the handler by users, find a path to the corresponding blinded path's introduction // node, and then enqueue the message for sending to the first peer in the full path. diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index e47f577c0b4..91f6869067b 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -21,17 +21,17 @@ use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hashes::Hash; use bitcoin::network::Network; -use crate::events::{MessageSendEvent, MessageSendEventsProvider}; use crate::ln::msgs; use crate::ln::msgs::{ - ChannelAnnouncement, ChannelUpdate, GossipTimestampFilter, NodeAnnouncement, + BaseMessageHandler, ChannelAnnouncement, ChannelUpdate, GossipTimestampFilter, NodeAnnouncement, }; use crate::ln::msgs::{ DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, SocketAddress, MAX_VALUE_MSAT, }; use crate::ln::msgs::{ - QueryChannelRange, QueryShortChannelIds, ReplyChannelRange, ReplyShortChannelIdsEnd, + MessageSendEvent, QueryChannelRange, QueryShortChannelIds, ReplyChannelRange, + ReplyShortChannelIdsEnd, }; use crate::ln::types::ChannelId; use crate::routing::utxo::{self, UtxoLookup, UtxoResolver}; @@ -602,112 +602,6 @@ where None } - /// Initiates a stateless sync of routing gossip information with a peer - /// using [`gossip_queries`]. The default strategy used by this implementation - /// is to sync the full block range with several peers. - /// - /// We should expect one or more [`reply_channel_range`] messages in response - /// to our [`query_channel_range`]. Each reply will enqueue a [`query_scid`] message - /// to request gossip messages for each channel. The sync is considered complete - /// when the final [`reply_scids_end`] message is received, though we are not - /// tracking this directly. - /// - /// [`gossip_queries`]: https://github.com/lightning/bolts/blob/master/07-routing-gossip.md#query-messages - /// [`reply_channel_range`]: msgs::ReplyChannelRange - /// [`query_channel_range`]: msgs::QueryChannelRange - /// [`query_scid`]: msgs::QueryShortChannelIds - /// [`reply_scids_end`]: msgs::ReplyShortChannelIdsEnd - fn peer_connected( - &self, their_node_id: PublicKey, init_msg: &Init, _inbound: bool, - ) -> Result<(), ()> { - // We will only perform a sync with peers that support gossip_queries. - if !init_msg.features.supports_gossip_queries() { - // Don't disconnect peers for not supporting gossip queries. We may wish to have - // channels with peers even without being able to exchange gossip. - return Ok(()); - } - - // The lightning network's gossip sync system is completely broken in numerous ways. - // - // Given no broadly-available set-reconciliation protocol, the only reasonable approach is - // to do a full sync from the first few peers we connect to, and then receive gossip - // updates from all our peers normally. - // - // Originally, we could simply tell a peer to dump us the entire gossip table on startup, - // wasting lots of bandwidth but ensuring we have the full network graph. After the initial - // dump peers would always send gossip and we'd stay up-to-date with whatever our peer has - // seen. - // - // In order to reduce the bandwidth waste, "gossip queries" were introduced, allowing you - // to ask for the SCIDs of all channels in your peer's routing graph, and then only request - // channel data which you are missing. Except there was no way at all to identify which - // `channel_update`s you were missing, so you still had to request everything, just in a - // very complicated way with some queries instead of just getting the dump. - // - // Later, an option was added to fetch the latest timestamps of the `channel_update`s to - // make efficient sync possible, however it has yet to be implemented in lnd, which makes - // relying on it useless. - // - // After gossip queries were introduced, support for receiving a full gossip table dump on - // connection was removed from several nodes, making it impossible to get a full sync - // without using the "gossip queries" messages. - // - // Once you opt into "gossip queries" the only way to receive any gossip updates that a - // peer receives after you connect, you must send a `gossip_timestamp_filter` message. This - // message, as the name implies, tells the peer to not forward any gossip messages with a - // timestamp older than a given value (not the time the peer received the filter, but the - // timestamp in the update message, which is often hours behind when the peer received the - // message). - // - // Obnoxiously, `gossip_timestamp_filter` isn't *just* a filter, but its also a request for - // your peer to send you the full routing graph (subject to the filter). Thus, in order to - // tell a peer to send you any updates as it sees them, you have to also ask for the full - // routing graph to be synced. If you set a timestamp filter near the current time, peers - // will simply not forward any new updates they see to you which were generated some time - // ago (which is not uncommon). If you instead set a timestamp filter near 0 (or two weeks - // ago), you will always get the full routing graph from all your peers. - // - // Most lightning nodes today opt to simply turn off receiving gossip data which only - // propagated some time after it was generated, and, worse, often disable gossiping with - // several peers after their first connection. The second behavior can cause gossip to not - // propagate fully if there are cuts in the gossiping subgraph. - // - // In an attempt to cut a middle ground between always fetching the full graph from all of - // our peers and never receiving gossip from peers at all, we send all of our peers a - // `gossip_timestamp_filter`, with the filter time set either two weeks ago or an hour ago. - // - // For non-`std` builds, we bury our head in the sand and do a full sync on each connection. - #[allow(unused_mut, unused_assignments)] - let mut gossip_start_time = 0; - #[allow(unused)] - let should_sync = self.should_request_full_sync(); - #[cfg(feature = "std")] - { - gossip_start_time = SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("Time must be > 1970") - .as_secs(); - if should_sync { - gossip_start_time -= 60 * 60 * 24 * 7 * 2; // 2 weeks ago - } else { - gossip_start_time -= 60 * 60; // an hour ago - } - } - - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push(MessageSendEvent::SendGossipTimestampFilter { - node_id: their_node_id.clone(), - msg: GossipTimestampFilter { - chain_hash: self.network_graph.chain_hash, - first_timestamp: gossip_start_time as u32, // 2106 issue! - timestamp_range: u32::max_value(), - }, - }); - Ok(()) - } - - fn peer_disconnected(&self, _their_node_id: PublicKey) {} - fn handle_reply_channel_range( &self, _their_node_id: PublicKey, _msg: ReplyChannelRange, ) -> Result<(), LightningError> { @@ -855,6 +749,123 @@ where }) } + fn processing_queue_high(&self) -> bool { + self.network_graph.pending_checks.too_many_checks_pending() + } +} + +impl>, U: Deref, L: Deref> BaseMessageHandler + for P2PGossipSync +where + U::Target: UtxoLookup, + L::Target: Logger, +{ + /// Initiates a stateless sync of routing gossip information with a peer + /// using [`gossip_queries`]. The default strategy used by this implementation + /// is to sync the full block range with several peers. + /// + /// We should expect one or more [`reply_channel_range`] messages in response + /// to our [`query_channel_range`]. Each reply will enqueue a [`query_scid`] message + /// to request gossip messages for each channel. The sync is considered complete + /// when the final [`reply_scids_end`] message is received, though we are not + /// tracking this directly. + /// + /// [`gossip_queries`]: https://github.com/lightning/bolts/blob/master/07-routing-gossip.md#query-messages + /// [`reply_channel_range`]: msgs::ReplyChannelRange + /// [`query_channel_range`]: msgs::QueryChannelRange + /// [`query_scid`]: msgs::QueryShortChannelIds + /// [`reply_scids_end`]: msgs::ReplyShortChannelIdsEnd + fn peer_connected( + &self, their_node_id: PublicKey, init_msg: &Init, _inbound: bool, + ) -> Result<(), ()> { + // We will only perform a sync with peers that support gossip_queries. + if !init_msg.features.supports_gossip_queries() { + // Don't disconnect peers for not supporting gossip queries. We may wish to have + // channels with peers even without being able to exchange gossip. + return Ok(()); + } + + // The lightning network's gossip sync system is completely broken in numerous ways. + // + // Given no broadly-available set-reconciliation protocol, the only reasonable approach is + // to do a full sync from the first few peers we connect to, and then receive gossip + // updates from all our peers normally. + // + // Originally, we could simply tell a peer to dump us the entire gossip table on startup, + // wasting lots of bandwidth but ensuring we have the full network graph. After the initial + // dump peers would always send gossip and we'd stay up-to-date with whatever our peer has + // seen. + // + // In order to reduce the bandwidth waste, "gossip queries" were introduced, allowing you + // to ask for the SCIDs of all channels in your peer's routing graph, and then only request + // channel data which you are missing. Except there was no way at all to identify which + // `channel_update`s you were missing, so you still had to request everything, just in a + // very complicated way with some queries instead of just getting the dump. + // + // Later, an option was added to fetch the latest timestamps of the `channel_update`s to + // make efficient sync possible, however it has yet to be implemented in lnd, which makes + // relying on it useless. + // + // After gossip queries were introduced, support for receiving a full gossip table dump on + // connection was removed from several nodes, making it impossible to get a full sync + // without using the "gossip queries" messages. + // + // Once you opt into "gossip queries" the only way to receive any gossip updates that a + // peer receives after you connect, you must send a `gossip_timestamp_filter` message. This + // message, as the name implies, tells the peer to not forward any gossip messages with a + // timestamp older than a given value (not the time the peer received the filter, but the + // timestamp in the update message, which is often hours behind when the peer received the + // message). + // + // Obnoxiously, `gossip_timestamp_filter` isn't *just* a filter, but its also a request for + // your peer to send you the full routing graph (subject to the filter). Thus, in order to + // tell a peer to send you any updates as it sees them, you have to also ask for the full + // routing graph to be synced. If you set a timestamp filter near the current time, peers + // will simply not forward any new updates they see to you which were generated some time + // ago (which is not uncommon). If you instead set a timestamp filter near 0 (or two weeks + // ago), you will always get the full routing graph from all your peers. + // + // Most lightning nodes today opt to simply turn off receiving gossip data which only + // propagated some time after it was generated, and, worse, often disable gossiping with + // several peers after their first connection. The second behavior can cause gossip to not + // propagate fully if there are cuts in the gossiping subgraph. + // + // In an attempt to cut a middle ground between always fetching the full graph from all of + // our peers and never receiving gossip from peers at all, we send all of our peers a + // `gossip_timestamp_filter`, with the filter time set either two weeks ago or an hour ago. + // + // For non-`std` builds, we bury our head in the sand and do a full sync on each connection. + #[allow(unused_mut, unused_assignments)] + let mut gossip_start_time = 0; + #[allow(unused)] + let should_sync = self.should_request_full_sync(); + #[cfg(feature = "std")] + { + gossip_start_time = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Time must be > 1970") + .as_secs(); + if should_sync { + gossip_start_time -= 60 * 60 * 24 * 7 * 2; // 2 weeks ago + } else { + gossip_start_time -= 60 * 60; // an hour ago + } + } + + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push(MessageSendEvent::SendGossipTimestampFilter { + node_id: their_node_id.clone(), + msg: GossipTimestampFilter { + chain_hash: self.network_graph.chain_hash, + first_timestamp: gossip_start_time as u32, // 2106 issue! + timestamp_range: u32::max_value(), + }, + }); + Ok(()) + } + + fn peer_disconnected(&self, _their_node_id: PublicKey) {} + fn provided_node_features(&self) -> NodeFeatures { let mut features = NodeFeatures::empty(); features.set_gossip_queries_optional(); @@ -867,17 +878,6 @@ where features } - fn processing_queue_high(&self) -> bool { - self.network_graph.pending_checks.too_many_checks_pending() - } -} - -impl>, U: Deref, L: Deref> MessageSendEventsProvider - for P2PGossipSync -where - U::Target: UtxoLookup, - L::Target: Logger, -{ fn get_and_clear_pending_msg_events(&self) -> Vec { let mut ret = Vec::new(); let mut pending_events = self.pending_events.lock().unwrap(); @@ -2711,10 +2711,9 @@ impl ReadOnlyNetworkGraph<'_> { #[cfg(test)] pub(crate) mod tests { - use crate::events::{MessageSendEvent, MessageSendEventsProvider}; use crate::ln::chan_utils::make_funding_redeemscript; use crate::ln::channelmanager; - use crate::ln::msgs::SocketAddress; + use crate::ln::msgs::{BaseMessageHandler, MessageSendEvent, SocketAddress}; use crate::ln::msgs::{ ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, QueryChannelRange, QueryShortChannelIds, ReplyChannelRange, RoutingMessageHandler, diff --git a/lightning/src/routing/scoring.rs b/lightning/src/routing/scoring.rs index 9ccbb20ab4b..43b469aacef 100644 --- a/lightning/src/routing/scoring.rs +++ b/lightning/src/routing/scoring.rs @@ -4080,13 +4080,12 @@ mod tests { pub mod benches { use super::*; use criterion::Criterion; - use crate::routing::router::{bench_utils, RouteHop}; + use crate::routing::router::bench_utils; use crate::util::test_utils::TestLogger; - use crate::types::features::{ChannelFeatures, NodeFeatures}; pub fn decay_100k_channel_bounds(bench: &mut Criterion) { let logger = TestLogger::new(); - let (network_graph, mut scorer) = bench_utils::read_graph_scorer(&logger).unwrap(); + let (_, mut scorer) = bench_utils::read_graph_scorer(&logger).unwrap(); let mut cur_time = Duration::ZERO; cur_time += Duration::from_millis(1); scorer.time_passed(cur_time); diff --git a/lightning/src/routing/utxo.rs b/lightning/src/routing/utxo.rs index c960846638a..bb7ec79a8c0 100644 --- a/lightning/src/routing/utxo.rs +++ b/lightning/src/routing/utxo.rs @@ -19,9 +19,8 @@ use bitcoin::constants::ChainHash; use bitcoin::hex::DisplayHex; -use crate::events::MessageSendEvent; use crate::ln::chan_utils::make_funding_redeemscript_from_slices; -use crate::ln::msgs::{self, LightningError, ErrorAction}; +use crate::ln::msgs::{self, LightningError, ErrorAction, MessageSendEvent}; use crate::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync}; use crate::util::logger::{Level, Logger}; diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 5d9f8b7a9dd..6f1f9d0862a 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -1063,8 +1063,9 @@ impl From for UpdateName { mod tests { use super::*; use crate::chain::ChannelMonitorUpdateStatus; - use crate::events::{ClosureReason, MessageSendEventsProvider}; + use crate::events::ClosureReason; use crate::ln::functional_test_utils::*; + use crate::ln::msgs::BaseMessageHandler; use crate::sync::Arc; use crate::util::test_channel_signer::TestChannelSigner; use crate::util::test_utils::{self, TestLogger, TestStore}; diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index cfe96666c15..657eb134922 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -21,13 +21,13 @@ use crate::chain::channelmonitor::{ }; use crate::chain::transaction::OutPoint; use crate::chain::WatchedOutput; -use crate::events; use crate::events::bump_transaction::{Utxo, WalletSource}; #[cfg(any(test, feature = "_externalize_tests"))] use crate::ln::chan_utils::CommitmentTransaction; use crate::ln::channel_state::ChannelDetails; use crate::ln::channelmanager; use crate::ln::inbound_payment::ExpandedKey; +use crate::ln::msgs::{BaseMessageHandler, MessageSendEvent}; use crate::ln::script::ShutdownScript; use crate::ln::types::ChannelId; use crate::ln::{msgs, wire}; @@ -913,7 +913,7 @@ impl ConnectionTracker { } pub struct TestChannelMessageHandler { - pub pending_events: Mutex>, + pub pending_events: Mutex>, expected_recv_msgs: Mutex>>>, pub conn_tracker: ConnectionTracker, chain_hash: ChainHash, @@ -1041,25 +1041,9 @@ impl msgs::ChannelMessageHandler for TestChannelMessageHandler { ) { self.received_msg(wire::Message::ChannelReestablish(msg.clone())); } - fn peer_disconnected(&self, their_node_id: PublicKey) { - self.conn_tracker.peer_disconnected(their_node_id) - } - fn peer_connected( - &self, their_node_id: PublicKey, _msg: &msgs::Init, _inbound: bool, - ) -> Result<(), ()> { - // Don't bother with `received_msg` for Init as its auto-generated and we don't want to - // bother re-generating the expected Init message in all tests. - self.conn_tracker.peer_connected(their_node_id) - } fn handle_error(&self, _their_node_id: PublicKey, msg: &msgs::ErrorMessage) { self.received_msg(wire::Message::Error(msg.clone())); } - fn provided_node_features(&self) -> NodeFeatures { - channelmanager::provided_node_features(&UserConfig::default()) - } - fn provided_init_features(&self, _their_init_features: PublicKey) -> InitFeatures { - channelmanager::provided_init_features(&UserConfig::default()) - } fn get_chain_hashes(&self) -> Option> { Some(vec![self.chain_hash]) @@ -1122,8 +1106,24 @@ impl msgs::ChannelMessageHandler for TestChannelMessageHandler { fn message_received(&self) {} } -impl events::MessageSendEventsProvider for TestChannelMessageHandler { - fn get_and_clear_pending_msg_events(&self) -> Vec { +impl msgs::BaseMessageHandler for TestChannelMessageHandler { + fn peer_disconnected(&self, their_node_id: PublicKey) { + self.conn_tracker.peer_disconnected(their_node_id) + } + fn peer_connected( + &self, their_node_id: PublicKey, _msg: &msgs::Init, _inbound: bool, + ) -> Result<(), ()> { + // Don't bother with `received_msg` for Init as its auto-generated and we don't want to + // bother re-generating the expected Init message in all tests. + self.conn_tracker.peer_connected(their_node_id) + } + fn provided_node_features(&self) -> NodeFeatures { + channelmanager::provided_node_features(&UserConfig::default()) + } + fn provided_init_features(&self, _their_init_features: PublicKey) -> InitFeatures { + channelmanager::provided_init_features(&UserConfig::default()) + } + fn get_and_clear_pending_msg_events(&self) -> Vec { Self::MESSAGE_FETCH_COUNTER.with(|val| val.fetch_add(1, Ordering::AcqRel)); let mut pending_events = self.pending_events.lock().unwrap(); let mut ret = Vec::new(); @@ -1186,7 +1186,7 @@ fn get_dummy_channel_update(short_chan_id: u64) -> msgs::ChannelUpdate { pub struct TestRoutingMessageHandler { pub chan_upds_recvd: AtomicUsize, pub chan_anns_recvd: AtomicUsize, - pub pending_events: Mutex>, + pub pending_events: Mutex>, pub request_full_sync: AtomicBool, pub announcement_available_for_sync: AtomicBool, pub conn_tracker: ConnectionTracker, @@ -1244,6 +1244,36 @@ impl msgs::RoutingMessageHandler for TestRoutingMessageHandler { None } + fn handle_reply_channel_range( + &self, _their_node_id: PublicKey, _msg: msgs::ReplyChannelRange, + ) -> Result<(), msgs::LightningError> { + Ok(()) + } + + fn handle_reply_short_channel_ids_end( + &self, _their_node_id: PublicKey, _msg: msgs::ReplyShortChannelIdsEnd, + ) -> Result<(), msgs::LightningError> { + Ok(()) + } + + fn handle_query_channel_range( + &self, _their_node_id: PublicKey, _msg: msgs::QueryChannelRange, + ) -> Result<(), msgs::LightningError> { + Ok(()) + } + + fn handle_query_short_channel_ids( + &self, _their_node_id: PublicKey, _msg: msgs::QueryShortChannelIds, + ) -> Result<(), msgs::LightningError> { + Ok(()) + } + + fn processing_queue_high(&self) -> bool { + false + } +} + +impl BaseMessageHandler for TestRoutingMessageHandler { fn peer_connected( &self, their_node_id: PublicKey, init_msg: &msgs::Init, _inbound: bool, ) -> Result<(), ()> { @@ -1268,7 +1298,7 @@ impl msgs::RoutingMessageHandler for TestRoutingMessageHandler { } let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push(events::MessageSendEvent::SendGossipTimestampFilter { + pending_events.push(MessageSendEvent::SendGossipTimestampFilter { node_id: their_node_id.clone(), msg: msgs::GossipTimestampFilter { chain_hash: ChainHash::using_genesis_block(Network::Testnet), @@ -1284,30 +1314,6 @@ impl msgs::RoutingMessageHandler for TestRoutingMessageHandler { self.conn_tracker.peer_disconnected(their_node_id); } - fn handle_reply_channel_range( - &self, _their_node_id: PublicKey, _msg: msgs::ReplyChannelRange, - ) -> Result<(), msgs::LightningError> { - Ok(()) - } - - fn handle_reply_short_channel_ids_end( - &self, _their_node_id: PublicKey, _msg: msgs::ReplyShortChannelIdsEnd, - ) -> Result<(), msgs::LightningError> { - Ok(()) - } - - fn handle_query_channel_range( - &self, _their_node_id: PublicKey, _msg: msgs::QueryChannelRange, - ) -> Result<(), msgs::LightningError> { - Ok(()) - } - - fn handle_query_short_channel_ids( - &self, _their_node_id: PublicKey, _msg: msgs::QueryShortChannelIds, - ) -> Result<(), msgs::LightningError> { - Ok(()) - } - fn provided_node_features(&self) -> NodeFeatures { let mut features = NodeFeatures::empty(); features.set_gossip_queries_optional(); @@ -1320,13 +1326,7 @@ impl msgs::RoutingMessageHandler for TestRoutingMessageHandler { features } - fn processing_queue_high(&self) -> bool { - false - } -} - -impl events::MessageSendEventsProvider for TestRoutingMessageHandler { - fn get_and_clear_pending_msg_events(&self) -> Vec { + fn get_and_clear_pending_msg_events(&self) -> Vec { let mut ret = Vec::new(); let mut pending_events = self.pending_events.lock().unwrap(); core::mem::swap(&mut ret, &mut pending_events);