diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index d7a13d59e3d..d8a1a181b99 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -49,7 +49,7 @@ use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; use bitcoin::script::ScriptBuf; use bitcoin::secp256k1::PublicKey; -use bitcoin::{OutPoint, Transaction}; +use bitcoin::{OutPoint, Transaction, TxOut}; use core::ops::Deref; #[allow(unused_imports)] @@ -1502,6 +1502,71 @@ pub enum Event { /// [`ChainMonitor::get_claimable_balances`]: crate::chain::chainmonitor::ChainMonitor::get_claimable_balances last_local_balance_msat: Option, }, + /// Used to indicate that a splice for the given `channel_id` has been negotiated and its + /// funding transaction has been broadcast. + /// + /// The splice is then considered pending until both parties have seen enough confirmations to + /// consider the funding locked. Once this occurs, an [`Event::ChannelReady`] will be emitted. + /// + /// Any UTXOs spent by the splice cannot be reused except by an RBF attempt for the same channel. + /// + /// # Failure Behavior and Persistence + /// This event will eventually be replayed after failures-to-handle (i.e., the event handler + /// returning `Err(ReplayEvent ())`) and will be persisted across restarts. + SplicePending { + /// The `channel_id` of the channel that has a pending splice funding transaction. + channel_id: ChannelId, + /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound + /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if + /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise + /// `user_channel_id` will be randomized for an inbound channel. + /// + /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel + /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel + /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels + user_channel_id: u128, + /// The `node_id` of the channel counterparty. + counterparty_node_id: PublicKey, + /// The outpoint of the channel's splice funding transaction. + new_funding_txo: OutPoint, + /// The features that this channel will operate with. Currently, these will be the same + /// features that the channel was opened with, but in the future splices may change them. + channel_type: ChannelTypeFeatures, + }, + /// Used to indicate that a splice for the given `channel_id` has failed. + /// + /// This event may be emitted if a splice fails after it has been initiated but prior to signing + /// any negotiated funding transaction. + /// + /// Any UTXOs contributed to be spent by the funding transaction may be reused and will be + /// given in `contributed_inputs`. + /// + /// # Failure Behavior and Persistence + /// This event will eventually be replayed after failures-to-handle (i.e., the event handler + /// returning `Err(ReplayEvent ())`) and will be persisted across restarts. + SpliceFailed { + /// The `channel_id` of the channel for which the splice failed. + channel_id: ChannelId, + /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound + /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if + /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise + /// `user_channel_id` will be randomized for an inbound channel. + /// + /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel + /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel + /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels + user_channel_id: u128, + /// The `node_id` of the channel counterparty. + counterparty_node_id: PublicKey, + /// The outpoint of the channel's splice funding transaction, if one was created. + abandoned_funding_txo: Option, + /// The features that this channel will operate with, if available. + channel_type: Option, + /// UTXOs spent as inputs contributed to the splice transaction. + contributed_inputs: Vec, + /// Outputs contributed to the splice transaction. + contributed_outputs: Vec, + }, /// Used to indicate to the user that they can abandon the funding transaction and recycle the /// inputs for another purpose. /// @@ -2228,6 +2293,42 @@ impl Writeable for Event { // We never write out FundingTransactionReadyForSigning events as they will be regenerated when // necessary. }, + &Event::SplicePending { + ref channel_id, + ref user_channel_id, + ref counterparty_node_id, + ref new_funding_txo, + ref channel_type, + } => { + 50u8.write(writer)?; + write_tlv_fields!(writer, { + (1, channel_id, required), + (3, channel_type, required), + (5, user_channel_id, required), + (7, counterparty_node_id, required), + (9, new_funding_txo, required), + }); + }, + &Event::SpliceFailed { + ref channel_id, + ref user_channel_id, + ref counterparty_node_id, + ref abandoned_funding_txo, + ref channel_type, + ref contributed_inputs, + ref contributed_outputs, + } => { + 52u8.write(writer)?; + write_tlv_fields!(writer, { + (1, channel_id, required), + (3, channel_type, option), + (5, user_channel_id, required), + (7, counterparty_node_id, required), + (9, abandoned_funding_txo, option), + (11, *contributed_inputs, optional_vec), + (13, *contributed_outputs, optional_vec), + }); + }, // Note that, going forward, all new events must only write data inside of // `write_tlv_fields`. Versions 0.0.101+ will ignore odd-numbered events that write // data via `write_tlv_fields`. @@ -2810,6 +2911,50 @@ impl MaybeReadable for Event { 47u8 => Ok(None), // Note that we do not write a length-prefixed TLV for FundingTransactionReadyForSigning events. 49u8 => Ok(None), + 50u8 => { + let mut f = || { + _init_and_read_len_prefixed_tlv_fields!(reader, { + (1, channel_id, required), + (3, channel_type, required), + (5, user_channel_id, required), + (7, counterparty_node_id, required), + (9, new_funding_txo, required), + }); + + Ok(Some(Event::SplicePending { + channel_id: channel_id.0.unwrap(), + user_channel_id: user_channel_id.0.unwrap(), + counterparty_node_id: counterparty_node_id.0.unwrap(), + new_funding_txo: new_funding_txo.0.unwrap(), + channel_type: channel_type.0.unwrap(), + })) + }; + f() + }, + 52u8 => { + let mut f = || { + _init_and_read_len_prefixed_tlv_fields!(reader, { + (1, channel_id, required), + (3, channel_type, option), + (5, user_channel_id, required), + (7, counterparty_node_id, required), + (9, abandoned_funding_txo, option), + (11, contributed_inputs, optional_vec), + (13, contributed_outputs, optional_vec), + }); + + Ok(Some(Event::SpliceFailed { + channel_id: channel_id.0.unwrap(), + user_channel_id: user_channel_id.0.unwrap(), + counterparty_node_id: counterparty_node_id.0.unwrap(), + abandoned_funding_txo, + channel_type, + contributed_inputs: contributed_inputs.unwrap_or_default(), + contributed_outputs: contributed_outputs.unwrap_or_default(), + })) + }; + f() + }, // Versions prior to 0.0.100 did not ignore odd types, instead returning InvalidValue. // Version 0.0.100 failed to properly ignore odd types, possibly resulting in corrupt // reads. diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index d66ddc97c45..cd95c27dd6d 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -920,6 +920,7 @@ pub(super) enum ChannelError { Ignore(String), Warn(String), WarnAndDisconnect(String), + Abort(AbortReason), Close((String, ClosureReason)), SendError(String), } @@ -932,6 +933,7 @@ impl fmt::Debug for ChannelError { &ChannelError::WarnAndDisconnect(ref e) => { write!(f, "Disconnecting with warning: {}", e) }, + &ChannelError::Abort(ref reason) => write!(f, "Abort: {}", reason), &ChannelError::Close((ref e, _)) => write!(f, "Close: {}", e), &ChannelError::SendError(ref e) => write!(f, "Not Found: {}", e), } @@ -944,6 +946,7 @@ impl fmt::Display for ChannelError { &ChannelError::Ignore(ref e) => write!(f, "{}", e), &ChannelError::Warn(ref e) => write!(f, "{}", e), &ChannelError::WarnAndDisconnect(ref e) => write!(f, "{}", e), + &ChannelError::Abort(ref reason) => write!(f, "{}", reason), &ChannelError::Close((ref e, _)) => write!(f, "{}", e), &ChannelError::SendError(ref e) => write!(f, "{}", e), } @@ -1188,6 +1191,17 @@ pub(crate) struct ShutdownResult { pub(crate) unbroadcasted_funding_tx: Option, pub(crate) channel_funding_txo: Option, pub(crate) last_local_balance_msat: u64, + /// If a splice was in progress when the channel was shut down, this contains + /// the splice funding information for emitting a SpliceFailed event. + pub(crate) splice_funding_failed: Option, +} + +/// The result of a peer disconnection. +pub(crate) struct DisconnectResult { + pub(crate) is_resumable: bool, + /// If a splice was in progress when the channel was shut down, this contains + /// the splice funding information for emitting a SpliceFailed event. + pub(crate) splice_funding_failed: Option, } /// Tracks the transaction number, along with current and next commitment points. @@ -1582,11 +1596,15 @@ where /// Should be called when the peer is disconnected. Returns true if the channel can be resumed /// when the peer reconnects (via [`Self::peer_connected_get_handshake`]). If not, the channel /// must be immediately closed. - #[rustfmt::skip] - pub fn peer_disconnected_is_resumable(&mut self, logger: &L) -> bool where L::Target: Logger { - match &mut self.phase { + pub fn peer_disconnected_is_resumable(&mut self, logger: &L) -> DisconnectResult + where + L::Target: Logger, + { + let is_resumable = match &mut self.phase { ChannelPhase::Undefined => unreachable!(), - ChannelPhase::Funded(chan) => chan.remove_uncommitted_htlcs_and_mark_paused(logger).is_ok(), + ChannelPhase::Funded(chan) => { + chan.remove_uncommitted_htlcs_and_mark_paused(logger).is_ok() + }, // If we get disconnected and haven't yet committed to a funding // transaction, we can replay the `open_channel` on reconnection, so don't // bother dropping the channel here. However, if we already committed to @@ -1596,7 +1614,40 @@ where ChannelPhase::UnfundedOutboundV1(chan) => chan.is_resumable(), ChannelPhase::UnfundedInboundV1(_) => false, ChannelPhase::UnfundedV2(_) => false, - } + }; + + let splice_funding_failed = if let ChannelPhase::Funded(chan) = &mut self.phase { + // Reset any quiescence-related state as it is implicitly terminated once disconnected. + if matches!(chan.context.channel_state, ChannelState::ChannelReady(_)) { + if chan.quiescent_action.is_some() { + // If we were trying to get quiescent, try again after reconnection. + chan.context.channel_state.set_awaiting_quiescence(); + } + chan.context.channel_state.clear_local_stfu_sent(); + chan.context.channel_state.clear_remote_stfu_sent(); + if chan.should_reset_pending_splice_state() { + // If there was a pending splice negotiation that failed due to disconnecting, we + // also take the opportunity to clean up our state. + let splice_funding_failed = chan.reset_pending_splice_state(); + debug_assert!(!chan.context.channel_state.is_quiescent()); + splice_funding_failed + } else if !chan.has_pending_splice_awaiting_signatures() { + // We shouldn't be quiescent anymore upon reconnecting if: + // - We were in quiescence but a splice/RBF was never negotiated or + // - We were in quiescence but the splice negotiation failed due to disconnecting + chan.context.channel_state.clear_quiescent(); + None + } else { + None + } + } else { + None + } + } else { + None + }; + + DisconnectResult { is_resumable, splice_funding_failed } } /// Should be called when the peer re-connects, returning an initial message which we should @@ -1680,110 +1731,132 @@ where fn fail_interactive_tx_negotiation( &mut self, reason: AbortReason, logger: &L, - ) -> msgs::TxAbort + ) -> (ChannelError, Option) where L::Target: Logger, { let logger = WithChannelContext::from(logger, &self.context(), None); log_info!(logger, "Failed interactive transaction negotiation: {reason}"); - match &mut self.phase { + let splice_funding_failed = match &mut self.phase { ChannelPhase::Undefined => unreachable!(), - ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => {}, + ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => None, ChannelPhase::UnfundedV2(pending_v2_channel) => { pending_v2_channel.interactive_tx_constructor.take(); + None }, ChannelPhase::Funded(funded_channel) => { if funded_channel.should_reset_pending_splice_state() { - funded_channel.reset_pending_splice_state(); + funded_channel.reset_pending_splice_state() } else { debug_assert!(false, "We should never fail an interactive funding negotiation once we're exchanging tx_signatures"); + None } }, }; - reason.into_tx_abort_msg(self.context().channel_id) + (ChannelError::Abort(reason), splice_funding_failed) } pub fn tx_add_input( &mut self, msg: &msgs::TxAddInput, logger: &L, - ) -> Result + ) -> Result)> where L::Target: Logger, { match self.interactive_tx_constructor_mut() { - Some(interactive_tx_constructor) => interactive_tx_constructor.handle_tx_add_input(msg), - None => Err(AbortReason::InternalError( - "Received unexpected interactive transaction negotiation message", + Some(interactive_tx_constructor) => interactive_tx_constructor + .handle_tx_add_input(msg) + .map_err(|reason| self.fail_interactive_tx_negotiation(reason, logger)), + None => Err(( + ChannelError::WarnAndDisconnect( + "Received unexpected interactive transaction negotiation message".to_owned(), + ), + None, )), } - .map_err(|abort_reason| self.fail_interactive_tx_negotiation(abort_reason, logger)) } pub fn tx_add_output( &mut self, msg: &msgs::TxAddOutput, logger: &L, - ) -> Result + ) -> Result)> where L::Target: Logger, { match self.interactive_tx_constructor_mut() { - Some(interactive_tx_constructor) => { - interactive_tx_constructor.handle_tx_add_output(msg) - }, - None => Err(AbortReason::InternalError( - "Received unexpected interactive transaction negotiation message", + Some(interactive_tx_constructor) => interactive_tx_constructor + .handle_tx_add_output(msg) + .map_err(|reason| self.fail_interactive_tx_negotiation(reason, logger)), + None => Err(( + ChannelError::WarnAndDisconnect( + "Received unexpected interactive transaction negotiation message".to_owned(), + ), + None, )), } - .map_err(|abort_reason| self.fail_interactive_tx_negotiation(abort_reason, logger)) } pub fn tx_remove_input( &mut self, msg: &msgs::TxRemoveInput, logger: &L, - ) -> Result + ) -> Result)> where L::Target: Logger, { match self.interactive_tx_constructor_mut() { - Some(interactive_tx_constructor) => { - interactive_tx_constructor.handle_tx_remove_input(msg) - }, - None => Err(AbortReason::InternalError( - "Received unexpected interactive transaction negotiation message", + Some(interactive_tx_constructor) => interactive_tx_constructor + .handle_tx_remove_input(msg) + .map_err(|reason| self.fail_interactive_tx_negotiation(reason, logger)), + None => Err(( + ChannelError::WarnAndDisconnect( + "Received unexpected interactive transaction negotiation message".to_owned(), + ), + None, )), } - .map_err(|abort_reason| self.fail_interactive_tx_negotiation(abort_reason, logger)) } pub fn tx_remove_output( &mut self, msg: &msgs::TxRemoveOutput, logger: &L, - ) -> Result + ) -> Result)> where L::Target: Logger, { match self.interactive_tx_constructor_mut() { - Some(interactive_tx_constructor) => { - interactive_tx_constructor.handle_tx_remove_output(msg) - }, - None => Err(AbortReason::InternalError( - "Received unexpected interactive transaction negotiation message", + Some(interactive_tx_constructor) => interactive_tx_constructor + .handle_tx_remove_output(msg) + .map_err(|reason| self.fail_interactive_tx_negotiation(reason, logger)), + None => Err(( + ChannelError::WarnAndDisconnect( + "Received unexpected interactive transaction negotiation message".to_owned(), + ), + None, )), } - .map_err(|abort_reason| self.fail_interactive_tx_negotiation(abort_reason, logger)) } pub fn tx_complete( &mut self, msg: &msgs::TxComplete, logger: &L, - ) -> Result<(Option, Option), msgs::TxAbort> + ) -> Result< + (Option, Option), + (ChannelError, Option), + > where L::Target: Logger, { let tx_complete_action = match self.interactive_tx_constructor_mut() { - Some(interactive_tx_constructor) => interactive_tx_constructor.handle_tx_complete(msg), - None => Err(AbortReason::InternalError( - "Received unexpected interactive transaction negotiation message", - )), - } - .map_err(|abort_reason| self.fail_interactive_tx_negotiation(abort_reason, logger))?; + Some(interactive_tx_constructor) => interactive_tx_constructor + .handle_tx_complete(msg) + .map_err(|reason| self.fail_interactive_tx_negotiation(reason, logger))?, + None => { + return Err(( + ChannelError::WarnAndDisconnect( + "Received unexpected interactive transaction negotiation message" + .to_owned(), + ), + None, + )) + }, + }; let (interactive_tx_msg_send, negotiation_complete) = match tx_complete_action { HandleTxCompleteValue::SendTxMessage(interactive_tx_msg_send) => { @@ -1809,7 +1882,7 @@ where pub fn tx_abort( &mut self, msg: &msgs::TxAbort, logger: &L, - ) -> Result, ChannelError> + ) -> Result<(Option, Option), ChannelError> where L::Target: Logger, { @@ -1818,14 +1891,16 @@ where // https://github.com/lightning/bolts/blob/247e83d/02-peer-protocol.md?plain=1#L560-L561 // For rationale why we echo back `tx_abort`: // https://github.com/lightning/bolts/blob/247e83d/02-peer-protocol.md?plain=1#L578-L580 - let should_ack = match &mut self.phase { + let (should_ack, splice_funding_failed) = match &mut self.phase { ChannelPhase::Undefined => unreachable!(), ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => { let err = "Got an unexpected tx_abort message: This is an unfunded channel created with V1 channel establishment"; return Err(ChannelError::Warn(err.into())); }, ChannelPhase::UnfundedV2(pending_v2_channel) => { - pending_v2_channel.interactive_tx_constructor.take().is_some() + let had_constructor = + pending_v2_channel.interactive_tx_constructor.take().is_some(); + (had_constructor, None) }, ChannelPhase::Funded(funded_channel) => { if funded_channel.has_pending_splice_awaiting_signatures() { @@ -1834,18 +1909,23 @@ where )); } if funded_channel.should_reset_pending_splice_state() { - let has_funding_negotiation = funded_channel.reset_pending_splice_state(); + let has_funding_negotiation = funded_channel + .pending_splice + .as_ref() + .map(|pending_splice| pending_splice.funding_negotiation.is_some()) + .unwrap_or(false); debug_assert!(has_funding_negotiation); - true + let splice_funding_failed = funded_channel.reset_pending_splice_state(); + (true, splice_funding_failed) } else { // We were not tracking the pending funding negotiation state anymore, likely // due to a disconnection or already having sent our own `tx_abort`. - false + (false, None) } }, }; - Ok(should_ack.then(|| { + let tx_abort = should_ack.then(|| { let logger = WithChannelContext::from(logger, &self.context(), None); let reason = types::string::UntrustedString(String::from_utf8_lossy(&msg.data).to_string()); @@ -1854,7 +1934,9 @@ where channel_id: msg.channel_id, data: "Acknowledged tx_abort".to_string().into_bytes(), } - })) + }); + + Ok((tx_abort, splice_funding_failed)) } #[rustfmt::skip] @@ -1931,7 +2013,8 @@ where interactive_tx_constructor, } = funding_negotiation { - Some((funding, interactive_tx_constructor)) + let is_initiator = interactive_tx_constructor.is_initiator(); + Some((is_initiator, funding, interactive_tx_constructor)) } else { // Replace the taken state for later error handling pending_splice.funding_negotiation = Some(funding_negotiation); @@ -1943,7 +2026,7 @@ where "Got a tx_complete message in an invalid state", ) }) - .and_then(|(mut funding, interactive_tx_constructor)| { + .and_then(|(is_initiator, mut funding, interactive_tx_constructor)| { match chan.context.funding_tx_constructed( &mut funding, funding_outpoint, @@ -1954,7 +2037,10 @@ where Ok(commitment_signed) => { // Advance the state pending_splice.funding_negotiation = - Some(FundingNegotiation::AwaitingSignatures { funding }); + Some(FundingNegotiation::AwaitingSignatures { + is_initiator, + funding, + }); Ok((interactive_tx_constructor, commitment_signed)) }, Err(e) => { @@ -2558,12 +2644,14 @@ enum FundingNegotiation { }, AwaitingSignatures { funding: FundingScope, + is_initiator: bool, }, } impl_writeable_tlv_based_enum_upgradable!(FundingNegotiation, (0, AwaitingSignatures) => { (1, funding, required), + (3, is_initiator, required), }, unread_variants: AwaitingAck, ConstructingTransaction ); @@ -2573,7 +2661,17 @@ impl FundingNegotiation { match self { FundingNegotiation::AwaitingAck { .. } => None, FundingNegotiation::ConstructingTransaction { funding, .. } => Some(funding), - FundingNegotiation::AwaitingSignatures { funding } => Some(funding), + FundingNegotiation::AwaitingSignatures { funding, .. } => Some(funding), + } + } + + fn is_initiator(&self) -> bool { + match self { + FundingNegotiation::AwaitingAck { context } => context.is_initiator, + FundingNegotiation::ConstructingTransaction { interactive_tx_constructor, .. } => { + interactive_tx_constructor.is_initiator() + }, + FundingNegotiation::AwaitingSignatures { is_initiator, .. } => *is_initiator, } } } @@ -2636,6 +2734,15 @@ pub(crate) struct SpliceInstructions { locktime: u32, } +impl SpliceInstructions { + fn into_contributed_inputs_and_outputs(self) -> (Vec, Vec) { + ( + self.our_funding_inputs.into_iter().map(|input| input.utxo.outpoint).collect(), + self.our_funding_outputs, + ) + } +} + impl_writeable_tlv_based!(SpliceInstructions, { (1, adjusted_funding_contribution, required), (3, our_funding_inputs, required_vec), @@ -5990,6 +6097,7 @@ where is_manual_broadcast: self.is_manual_broadcast, channel_funding_txo: funding.get_funding_txo(), last_local_balance_msat: funding.value_to_self_msat, + splice_funding_failed: None, } } @@ -6614,12 +6722,22 @@ impl FundingNegotiationContext { } fn into_negotiation_error(self, reason: AbortReason) -> NegotiationError { + let (contributed_inputs, contributed_outputs) = self.into_contributed_inputs_and_outputs(); + NegotiationError { reason, contributed_inputs, contributed_outputs } + } + + fn into_contributed_inputs_and_outputs(self) -> (Vec, Vec) { let contributed_inputs = self.our_funding_inputs.into_iter().map(|input| input.utxo.outpoint).collect(); - let contributed_outputs = self.our_funding_outputs; + (contributed_inputs, contributed_outputs) + } - NegotiationError { reason, contributed_inputs, contributed_outputs } + fn to_contributed_inputs_and_outputs(&self) -> (Vec, Vec) { + let contributed_inputs = + self.our_funding_inputs.iter().map(|input| input.utxo.outpoint).collect(); + let contributed_outputs = self.our_funding_outputs.clone(); + (contributed_inputs, contributed_outputs) } } @@ -6718,6 +6836,81 @@ type BestBlockUpdatedRes = ( Option, ); +/// The result of signing a funding transaction negotiated using the interactive-tx protocol. +pub struct FundingTxSigned { + /// Signatures that should be sent to the counterparty, if necessary. + pub tx_signatures: Option, + + /// The fully-signed funding transaction to be broadcast. + pub funding_tx: Option, + + /// Information about the completed funding negotiation. + pub splice_negotiated: Option, +} + +/// Information about a splice funding negotiation that has been completed. +pub struct SpliceFundingNegotiated { + /// The outpoint of the channel's splice funding transaction. + pub funding_txo: bitcoin::OutPoint, + + /// The features that this channel will operate with. + pub channel_type: ChannelTypeFeatures, +} + +/// Information about a splice funding negotiation that has failed. +pub struct SpliceFundingFailed { + /// The outpoint of the channel's splice funding transaction, if one was created. + pub funding_txo: Option, + + /// The features that this channel will operate with, if available. + pub channel_type: Option, + + /// UTXOs spent as inputs contributed to the splice transaction. + pub contributed_inputs: Vec, + + /// Outputs contributed to the splice transaction. + pub contributed_outputs: Vec, +} + +macro_rules! maybe_create_splice_funding_failed { + ($pending_splice: expr, $get: ident, $contributed_inputs_and_outputs: ident) => {{ + $pending_splice + .and_then(|pending_splice| pending_splice.funding_negotiation.$get()) + .filter(|funding_negotiation| funding_negotiation.is_initiator()) + .map(|funding_negotiation| { + let funding_txo = funding_negotiation + .as_funding() + .and_then(|funding| funding.get_funding_txo()) + .map(|txo| txo.into_bitcoin_outpoint()); + + let channel_type = funding_negotiation + .as_funding() + .map(|funding| funding.get_channel_type().clone()); + + let (contributed_inputs, contributed_outputs) = match funding_negotiation { + FundingNegotiation::AwaitingAck { context } => { + context.$contributed_inputs_and_outputs() + }, + FundingNegotiation::ConstructingTransaction { + interactive_tx_constructor, + .. + } => interactive_tx_constructor.$contributed_inputs_and_outputs(), + FundingNegotiation::AwaitingSignatures { .. } => { + debug_assert!(false); + (Vec::new(), Vec::new()) + }, + }; + + SpliceFundingFailed { + funding_txo, + channel_type, + contributed_inputs, + contributed_outputs, + } + }) + }}; +} + pub struct SpliceFundingPromotion { pub funding_txo: OutPoint, pub monitor_update: Option, @@ -6735,7 +6928,40 @@ where } pub fn force_shutdown(&mut self, closure_reason: ClosureReason) -> ShutdownResult { - self.context.force_shutdown(&self.funding, closure_reason) + let splice_funding_failed = self.maybe_fail_splice_negotiation(); + + let mut shutdown_result = self.context.force_shutdown(&self.funding, closure_reason); + shutdown_result.splice_funding_failed = splice_funding_failed; + shutdown_result + } + + fn maybe_fail_splice_negotiation(&mut self) -> Option { + if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) { + if self.should_reset_pending_splice_state() { + self.reset_pending_splice_state() + } else { + match self.quiescent_action.take() { + Some(QuiescentAction::Splice(instructions)) => { + self.context.channel_state.clear_awaiting_quiescence(); + let (inputs, outputs) = instructions.into_contributed_inputs_and_outputs(); + Some(SpliceFundingFailed { + funding_txo: None, + channel_type: None, + contributed_inputs: inputs, + contributed_outputs: outputs, + }) + }, + #[cfg(any(test, fuzzing))] + Some(quiescent_action) => { + self.quiescent_action = Some(quiescent_action); + None + }, + None => None, + } + } + } else { + None + } } fn interactive_tx_constructor_mut(&mut self) -> Option<&mut InteractiveTxConstructor> { @@ -6792,19 +7018,34 @@ where .unwrap_or(false) } - fn reset_pending_splice_state(&mut self) -> bool { + fn reset_pending_splice_state(&mut self) -> Option { debug_assert!(self.should_reset_pending_splice_state()); debug_assert!(self.context.interactive_tx_signing_session.is_none()); self.context.channel_state.clear_quiescent(); - let has_funding_negotiation = self - .pending_splice - .as_mut() - .and_then(|pending_splice| pending_splice.funding_negotiation.take()) - .is_some(); + + let splice_funding_failed = maybe_create_splice_funding_failed!( + self.pending_splice.as_mut(), + take, + into_contributed_inputs_and_outputs + ); + if self.pending_funding().is_empty() { self.pending_splice.take(); } - has_funding_negotiation + + splice_funding_failed + } + + pub(super) fn maybe_splice_funding_failed(&self) -> Option { + if !self.should_reset_pending_splice_state() { + return None; + } + + maybe_create_splice_funding_failed!( + self.pending_splice.as_ref(), + as_ref, + to_contributed_inputs_and_outputs + ) } #[rustfmt::skip] @@ -8636,30 +8877,46 @@ where } } - fn on_tx_signatures_exchange(&mut self, funding_tx: Transaction) { + fn on_tx_signatures_exchange( + &mut self, funding_tx: Transaction, + ) -> Option { debug_assert!(!self.context.channel_state.is_monitor_update_in_progress()); debug_assert!(!self.context.channel_state.is_awaiting_remote_revoke()); if let Some(pending_splice) = self.pending_splice.as_mut() { - if let Some(FundingNegotiation::AwaitingSignatures { mut funding }) = + self.context.channel_state.clear_quiescent(); + if let Some(FundingNegotiation::AwaitingSignatures { mut funding, .. }) = pending_splice.funding_negotiation.take() { funding.funding_transaction = Some(funding_tx); + + let funding_txo = + funding.get_funding_txo().expect("funding outpoint should be set"); + let channel_type = funding.get_channel_type().clone(); + pending_splice.negotiated_candidates.push(funding); + + let splice_negotiated = SpliceFundingNegotiated { + funding_txo: funding_txo.into_bitcoin_outpoint(), + channel_type, + }; + + Some(splice_negotiated) } else { debug_assert!(false); + None } - self.context.channel_state.clear_quiescent(); } else { self.funding.funding_transaction = Some(funding_tx); self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()); + None } } pub fn funding_transaction_signed( &mut self, funding_txid_signed: Txid, witnesses: Vec, - ) -> Result<(Option, Option), APIError> { + ) -> Result { let signing_session = if let Some(signing_session) = self.context.interactive_tx_signing_session.as_mut() { if let Some(pending_splice) = self.pending_splice.as_ref() { @@ -8676,7 +8933,11 @@ where if signing_session.holder_tx_signatures().is_some() { // Our `tx_signatures` either should've been the first time we processed them, // or we're waiting for our counterparty to send theirs first. - return Ok((None, None)); + return Ok(FundingTxSigned { + tx_signatures: None, + funding_tx: None, + splice_negotiated: None, + }); } signing_session @@ -8684,7 +8945,11 @@ where if Some(funding_txid_signed) == self.funding.get_funding_txid() { // We may be handling a duplicate call and the funding was already locked so we // no longer have the signing session present. - return Ok((None, None)); + return Ok(FundingTxSigned { + tx_signatures: None, + funding_tx: None, + splice_negotiated: None, + }); } let err = format!("Channel {} not expecting funding signatures", self.context.channel_id); @@ -8722,21 +8987,23 @@ where witnesses, shared_input_signature, }; - let (tx_signatures_opt, funding_tx_opt) = signing_session + let (tx_signatures, funding_tx) = signing_session .provide_holder_witnesses(tx_signatures, &self.context.secp_ctx) .map_err(|err| APIError::APIMisuseError { err })?; - if let Some(funding_tx) = funding_tx_opt.clone() { - debug_assert!(tx_signatures_opt.is_some()); - self.on_tx_signatures_exchange(funding_tx); - } + let splice_negotiated = if let Some(funding_tx) = funding_tx.clone() { + debug_assert!(tx_signatures.is_some()); + self.on_tx_signatures_exchange(funding_tx) + } else { + None + }; - Ok((tx_signatures_opt, funding_tx_opt)) + Ok(FundingTxSigned { tx_signatures, funding_tx, splice_negotiated }) } pub fn tx_signatures( &mut self, msg: &msgs::TxSignatures, - ) -> Result<(Option, Option), ChannelError> { + ) -> Result { let signing_session = if let Some(signing_session) = self.context.interactive_tx_signing_session.as_mut() { @@ -8779,14 +9046,16 @@ where } } - let (holder_tx_signatures_opt, funding_tx_opt) = + let (holder_tx_signatures, funding_tx) = signing_session.received_tx_signatures(msg).map_err(|msg| ChannelError::Warn(msg))?; - if let Some(funding_tx) = funding_tx_opt.clone() { - self.on_tx_signatures_exchange(funding_tx); - } + let splice_negotiated = if let Some(funding_tx) = funding_tx.clone() { + self.on_tx_signatures_exchange(funding_tx) + } else { + None + }; - Ok((holder_tx_signatures_opt, funding_tx_opt)) + Ok(FundingTxSigned { tx_signatures: holder_tx_signatures, funding_tx, splice_negotiated }) } /// Queues up an outbound update fee by placing it in the holding cell. You should call @@ -8935,27 +9204,6 @@ where } } - // Reset any quiescence-related state as it is implicitly terminated once disconnected. - if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) { - if self.quiescent_action.is_some() { - // If we were trying to get quiescent, try again after reconnection. - self.context.channel_state.set_awaiting_quiescence(); - } - self.context.channel_state.clear_local_stfu_sent(); - self.context.channel_state.clear_remote_stfu_sent(); - if self.should_reset_pending_splice_state() { - // If there was a pending splice negotiation that failed due to disconnecting, we - // also take the opportunity to clean up our state. - self.reset_pending_splice_state(); - debug_assert!(!self.context.channel_state.is_quiescent()); - } else if !self.has_pending_splice_awaiting_signatures() { - // We shouldn't be quiescent anymore upon reconnecting if: - // - We were in quiescence but a splice/RBF was never negotiated or - // - We were in quiescence but the splice negotiation failed due to disconnecting - self.context.channel_state.clear_quiescent(); - } - } - self.context.channel_state.set_peer_disconnected(); log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id()); Ok(()) @@ -9559,7 +9807,7 @@ where .as_ref() .and_then(|pending_splice| pending_splice.funding_negotiation.as_ref()) .and_then(|funding_negotiation| { - if let FundingNegotiation::AwaitingSignatures { funding } = &funding_negotiation { + if let FundingNegotiation::AwaitingSignatures { funding, .. } = &funding_negotiation { Some(funding) } else { None @@ -10221,6 +10469,7 @@ where is_manual_broadcast: self.context.is_manual_broadcast, channel_funding_txo: self.funding.get_funding_txo(), last_local_balance_msat: self.funding.value_to_self_msat, + splice_funding_failed: None, } } @@ -11627,9 +11876,9 @@ where .map_err(|e| APIError::APIMisuseError { err: e.to_owned() }) } - fn send_splice_init( - &mut self, instructions: SpliceInstructions, - ) -> Result { + fn send_splice_init(&mut self, instructions: SpliceInstructions) -> msgs::SpliceInit { + debug_assert!(self.pending_splice.is_none()); + let SpliceInstructions { adjusted_funding_contribution, our_funding_inputs, @@ -11639,15 +11888,6 @@ where locktime, } = instructions; - // Check if a splice has been initiated already. - // Note: only a single outstanding splice is supported (per spec) - if self.pending_splice.is_some() { - return Err(format!( - "Channel {} cannot be spliced, as it has already a splice pending", - self.context.channel_id(), - )); - } - let prev_funding_input = self.funding.to_splice_funding_input(); let context = FundingNegotiationContext { is_initiator: true, @@ -11671,14 +11911,40 @@ where let prev_funding_txid = self.funding.get_funding_txid(); let funding_pubkey = self.context.holder_pubkeys(prev_funding_txid).funding_pubkey; - Ok(msgs::SpliceInit { + msgs::SpliceInit { channel_id: self.context.channel_id, funding_contribution_satoshis: adjusted_funding_contribution.to_sat(), funding_feerate_per_kw, locktime, funding_pubkey, require_confirmed_inputs: None, - }) + } + } + + #[cfg(test)] + pub fn abandon_splice( + &mut self, + ) -> Result<(msgs::TxAbort, Option), APIError> { + if self.should_reset_pending_splice_state() { + let tx_abort = + msgs::TxAbort { channel_id: self.context.channel_id(), data: Vec::new() }; + let splice_funding_failed = self.reset_pending_splice_state(); + Ok((tx_abort, splice_funding_failed)) + } else if self.has_pending_splice_awaiting_signatures() { + Err(APIError::APIMisuseError { + err: format!( + "Channel {} splice cannot be abandoned; already awaiting signatures", + self.context.channel_id(), + ), + }) + } else { + Err(APIError::APIMisuseError { + err: format!( + "Channel {} splice cannot be abandoned; no pending splice", + self.context.channel_id(), + ), + }) + } } /// Checks during handling splice_init @@ -12823,10 +13089,20 @@ where "Internal Error: Didn't have anything to do after reaching quiescence".to_owned() )); }, - Some(QuiescentAction::Splice(_instructions)) => { - return self.send_splice_init(_instructions) - .map(|splice_init| Some(StfuResponse::SpliceInit(splice_init))) - .map_err(|e| ChannelError::WarnAndDisconnect(e.to_owned())); + Some(QuiescentAction::Splice(instructions)) => { + if self.pending_splice.is_some() { + self.quiescent_action = Some(QuiescentAction::Splice(instructions)); + + return Err(ChannelError::WarnAndDisconnect( + format!( + "Channel {} cannot be spliced as it already has a splice pending", + self.context.channel_id(), + ), + )); + } + + let splice_init = self.send_splice_init(instructions); + return Ok(Some(StfuResponse::SpliceInit(splice_init))); }, #[cfg(any(test, fuzzing))] Some(QuiescentAction::DoNothing) => { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 5cd5e80069b..86f020132a0 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -59,9 +59,10 @@ use crate::ln::chan_utils::selected_commitment_sat_per_1000_weight; #[cfg(any(test, fuzzing))] use crate::ln::channel::QuiescentAction; use crate::ln::channel::{ - self, hold_time_since, Channel, ChannelError, ChannelUpdateStatus, FundedChannel, - InboundV1Channel, OutboundV1Channel, PendingV2Channel, ReconnectionMsg, ShutdownResult, - StfuResponse, UpdateFulfillCommitFetch, WithChannelContext, + self, hold_time_since, Channel, ChannelError, ChannelUpdateStatus, DisconnectResult, + FundedChannel, FundingTxSigned, InboundV1Channel, OutboundV1Channel, PendingV2Channel, + ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse, UpdateFulfillCommitFetch, + WithChannelContext, }; use crate::ln::channel_state::ChannelDetails; use crate::ln::funding::SpliceContribution; @@ -931,6 +932,7 @@ struct MsgHandleErrInternal { err: msgs::LightningError, closes_channel: bool, shutdown_finish: Option<(ShutdownResult, Option)>, + tx_abort: Option, } impl MsgHandleErrInternal { fn send_err_msg_no_close(err: String, channel_id: ChannelId) -> Self { @@ -943,11 +945,12 @@ impl MsgHandleErrInternal { }, closes_channel: false, shutdown_finish: None, + tx_abort: None, } } fn from_no_close(err: msgs::LightningError) -> Self { - Self { err, closes_channel: false, shutdown_finish: None } + Self { err, closes_channel: false, shutdown_finish: None, tx_abort: None } } fn from_finish_shutdown( @@ -967,10 +970,15 @@ impl MsgHandleErrInternal { err: LightningError { err, action }, closes_channel: true, shutdown_finish: Some((shutdown_res, channel_update)), + tx_abort: None, } } fn from_chan_no_close(err: ChannelError, channel_id: ChannelId) -> Self { + let tx_abort = match &err { + &ChannelError::Abort(reason) => Some(reason.into_tx_abort_msg(channel_id)), + _ => None, + }; let err = match err { ChannelError::Warn(msg) => LightningError { err: msg.clone(), @@ -988,6 +996,9 @@ impl MsgHandleErrInternal { ChannelError::Ignore(msg) => { LightningError { err: msg, action: msgs::ErrorAction::IgnoreError } }, + ChannelError::Abort(reason) => { + LightningError { err: reason.to_string(), action: msgs::ErrorAction::IgnoreError } + }, ChannelError::Close((msg, _)) | ChannelError::SendError(msg) => LightningError { err: msg.clone(), action: msgs::ErrorAction::SendErrorMessage { @@ -995,7 +1006,7 @@ impl MsgHandleErrInternal { }, }, }; - Self { err, closes_channel: false, shutdown_finish: None } + Self { err, closes_channel: false, shutdown_finish: None, tx_abort } } fn dont_send_error_message(&mut self) { @@ -3210,7 +3221,7 @@ macro_rules! handle_error { match $internal { Ok(msg) => Ok(msg), - Err(MsgHandleErrInternal { err, shutdown_finish, .. }) => { + Err(MsgHandleErrInternal { err, shutdown_finish, tx_abort, .. }) => { let mut msg_event = None; if let Some((shutdown_res, update_option)) = shutdown_finish { @@ -3233,6 +3244,12 @@ macro_rules! handle_error { } if let msgs::ErrorAction::IgnoreError = err.action { + if let Some(tx_abort) = tx_abort { + msg_event = Some(MessageSendEvent::SendTxAbort { + node_id: $counterparty_node_id, + msg: tx_abort, + }); + } } else { msg_event = Some(MessageSendEvent::HandleError { node_id: $counterparty_node_id, @@ -3330,6 +3347,9 @@ macro_rules! convert_channel_err { ChannelError::Ignore(msg) => { (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id)) }, + ChannelError::Abort(reason) => { + (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Abort(reason), $channel_id)) + }, ChannelError::Close((msg, reason)) => { let (mut shutdown_res, chan_update) = $close(reason); let logger = WithChannelContext::from(&$self.logger, &$chan.context(), None); @@ -4516,6 +4536,18 @@ where last_local_balance_msat: Some(shutdown_res.last_local_balance_msat), }, None)); + if let Some(splice_funding_failed) = shutdown_res.splice_funding_failed.take() { + pending_events.push_back((events::Event::SpliceFailed { + channel_id: shutdown_res.channel_id, + counterparty_node_id: shutdown_res.counterparty_node_id, + user_channel_id: shutdown_res.user_channel_id, + abandoned_funding_txo: splice_funding_failed.funding_txo, + channel_type: splice_funding_failed.channel_type, + contributed_inputs: splice_funding_failed.contributed_inputs, + contributed_outputs: splice_funding_failed.contributed_outputs, + }, None)); + } + if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx { let funding_info = if shutdown_res.is_manual_broadcast { FundingInfo::OutPoint { @@ -4631,17 +4663,33 @@ where } } - /// Initiate a splice, to change the channel capacity of an existing funded channel. - /// After completion of splicing, the funding transaction will be replaced by a new one, spending the old funding transaction, - /// with optional extra inputs (splice-in) and/or extra outputs (splice-out or change). - /// TODO(splicing): Implementation is currently incomplete. + /// Initiate a splice in order to add value to (splice-in) or remove value from (splice-out) + /// the channel. This will spend the channel's funding transaction output, effectively replacing + /// it with a new one. + /// + /// # Arguments + /// + /// Provide a `contribution` to determine if value is spliced in or out. The splice initiator is + /// responsible for paying fees for common fields, shared inputs, and shared outputs along with + /// any contributed inputs and outputs. Fees are determined using `funding_feerate_per_kw` and + /// must be covered by the supplied inputs for splice-in or the channel balance for splice-out. + /// + /// An optional `locktime` for the funding transaction may be specified. If not given, the + /// current best block height is used. /// - /// Note: Currently only splice-in is supported (increase in channel capacity), splice-out is not. + /// # Events /// - /// - `our_funding_contribution_satoshis`: the amount contributed by us to the channel. This will increase our channel balance. - /// - `our_funding_inputs`: the funding inputs provided by us. If our contribution is positive, our funding inputs must cover at least that amount. - /// Includes the witness weight for this input (e.g. P2WPKH_WITNESS_WEIGHT=109 for typical P2WPKH inputs). - /// - `locktime`: Optional locktime for the new funding transaction. If None, set to the current block height. + /// Once the funding transaction has been constructed, an [`Event::SplicePending`] will be + /// emitted. At this point, any inputs contributed to the splice can only be re-spent if an + /// [`Event::DiscardFunding`] is seen. + /// + /// If any failures occur while negotiating the funding transaction, an [`Event::SpliceFailed`] + /// will be emitted. Any contributed inputs no longer used will be included here and thus can + /// be re-spent. + /// + /// Once the splice has been locked by both counterparties, an [`Event::ChannelReady`] will be + /// emitted with the new funding output. At this point, a new splice can be negotiated by + /// calling `splice_channel` again on this channel. #[rustfmt::skip] pub fn splice_channel( &self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, @@ -4654,7 +4702,7 @@ where ); res = result; match res { - Ok(_) => NotifyOption::SkipPersistHandleEvents, + Ok(_) => NotifyOption::DoPersist, Err(_) => NotifyOption::SkipPersistNoEvents, } }); @@ -4724,6 +4772,94 @@ where } } + #[cfg(test)] + pub(crate) fn abandon_splice( + &self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, + ) -> Result<(), APIError> { + let mut res = Ok(()); + PersistenceNotifierGuard::optionally_notify(self, || { + let result = self.internal_abandon_splice(channel_id, counterparty_node_id); + res = result; + match res { + Ok(_) => NotifyOption::SkipPersistHandleEvents, + Err(_) => NotifyOption::SkipPersistNoEvents, + } + }); + res + } + + #[cfg(test)] + fn internal_abandon_splice( + &self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, + ) -> Result<(), APIError> { + let per_peer_state = self.per_peer_state.read().unwrap(); + + let peer_state_mutex = match per_peer_state.get(counterparty_node_id).ok_or_else(|| { + APIError::ChannelUnavailable { + err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), + } + }) { + Ok(p) => p, + Err(e) => return Err(e), + }; + + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + + // Look for the channel + match peer_state.channel_by_id.entry(*channel_id) { + hash_map::Entry::Occupied(mut chan_phase_entry) => { + if !chan_phase_entry.get().context().is_connected() { + // TODO: We should probably support this, but right now `splice_channel` refuses when + // the peer is disconnected, so we just check it here. + return Err(APIError::ChannelUnavailable { + err: "Cannot abandon splice while peer is disconnected".to_owned(), + }); + } + + if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() { + let (tx_abort, splice_funding_failed) = chan.abandon_splice()?; + + peer_state.pending_msg_events.push(MessageSendEvent::SendTxAbort { + node_id: *counterparty_node_id, + msg: tx_abort, + }); + + if let Some(splice_funding_failed) = splice_funding_failed { + let pending_events = &mut self.pending_events.lock().unwrap(); + pending_events.push_back(( + events::Event::SpliceFailed { + channel_id: *channel_id, + counterparty_node_id: *counterparty_node_id, + user_channel_id: chan.context.get_user_id(), + abandoned_funding_txo: splice_funding_failed.funding_txo, + channel_type: splice_funding_failed.channel_type, + contributed_inputs: splice_funding_failed.contributed_inputs, + contributed_outputs: splice_funding_failed.contributed_outputs, + }, + None, + )); + } + + Ok(()) + } else { + Err(APIError::ChannelUnavailable { + err: format!( + "Channel with id {} is not funded, cannot abandon splice", + channel_id + ), + }) + } + }, + hash_map::Entry::Vacant(_) => Err(APIError::ChannelUnavailable { + err: format!( + "Channel with id {} not found for the passed counterparty node_id {}", + channel_id, counterparty_node_id, + ), + }), + } + } + #[rustfmt::skip] fn can_forward_htlc_to_outgoing_channel( &self, chan: &mut FundedChannel, msg: &msgs::UpdateAddHTLC, next_packet: &NextPacketDetails @@ -6298,10 +6434,26 @@ where .filter(|witness| !witness.is_empty()) .collect(); match chan.funding_transaction_signed(txid, witnesses) { - Ok((Some(tx_signatures), funding_tx_opt)) => { - if let Some(funding_tx) = funding_tx_opt { + Ok(FundingTxSigned { + tx_signatures: Some(tx_signatures), + funding_tx, + splice_negotiated, + }) => { + if let Some(funding_tx) = funding_tx { self.broadcast_interactive_funding(chan, &funding_tx); } + if let Some(splice_negotiated) = splice_negotiated { + self.pending_events.lock().unwrap().push_back(( + events::Event::SplicePending { + channel_id: *channel_id, + counterparty_node_id: *counterparty_node_id, + user_channel_id: chan.context.get_user_id(), + new_funding_txo: splice_negotiated.funding_txo, + channel_type: splice_negotiated.channel_type, + }, + None, + )); + } peer_state.pending_msg_events.push( MessageSendEvent::SendTxSignatures { node_id: *counterparty_node_id, @@ -6314,7 +6466,13 @@ where result = Err(err); return NotifyOption::SkipPersistNoEvents; }, - _ => { + Ok(FundingTxSigned { + tx_signatures: None, + funding_tx, + splice_negotiated, + }) => { + debug_assert!(funding_tx.is_none()); + debug_assert!(splice_negotiated.is_none()); return NotifyOption::SkipPersistNoEvents; }, } @@ -9413,10 +9571,24 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } else { let txid = signing_session.unsigned_tx().compute_txid(); match channel.funding_transaction_signed(txid, vec![]) { - Ok((Some(tx_signatures), funding_tx_opt)) => { - if let Some(funding_tx) = funding_tx_opt { + Ok(FundingTxSigned { tx_signatures: Some(tx_signatures), funding_tx, splice_negotiated }) => { + if let Some(funding_tx) = funding_tx { self.broadcast_interactive_funding(channel, &funding_tx); } + + if let Some(splice_negotiated) = splice_negotiated { + self.pending_events.lock().unwrap().push_back(( + events::Event::SplicePending { + channel_id: channel.context.channel_id(), + counterparty_node_id, + user_channel_id: channel.context.get_user_id(), + new_funding_txo: splice_negotiated.funding_txo, + channel_type: splice_negotiated.channel_type, + }, + None, + )); + } + if channel.context.is_connected() { pending_msg_events.push(MessageSendEvent::SendTxSignatures { node_id: counterparty_node_id, @@ -9424,7 +9596,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }); } }, - Ok((None, _)) => { + Ok(FundingTxSigned { tx_signatures: None, .. }) => { debug_assert!(false, "If our tx_signatures is empty, then we should send it first!"); }, Err(err) => { @@ -10234,11 +10406,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } fn internal_tx_msg< - HandleTxMsgFn: Fn(&mut Channel) -> Result, + HandleTxMsgFn: Fn( + &mut Channel, + ) -> Result)>, >( &self, counterparty_node_id: &PublicKey, channel_id: ChannelId, tx_msg_handler: HandleTxMsgFn, - ) -> Result<(), MsgHandleErrInternal> { + ) -> Result { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { debug_assert!(false); @@ -10252,17 +10426,28 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ match peer_state.channel_by_id.entry(channel_id) { hash_map::Entry::Occupied(mut chan_entry) => { let channel = chan_entry.get_mut(); - let msg_send_event = match tx_msg_handler(channel) { - Ok(msg_send) => msg_send.into_msg_send_event(*counterparty_node_id), - Err(tx_abort) => { - MessageSendEvent::SendTxAbort { - node_id: *counterparty_node_id, - msg: tx_abort, + match tx_msg_handler(channel) { + Ok(msg_send) => { + let msg_send_event = msg_send.into_msg_send_event(*counterparty_node_id); + peer_state.pending_msg_events.push(msg_send_event); + Ok(NotifyOption::SkipPersistHandleEvents) + }, + Err((error, splice_funding_failed)) => { + if let Some(splice_funding_failed) = splice_funding_failed { + let pending_events = &mut self.pending_events.lock().unwrap(); + pending_events.push_back((events::Event::SpliceFailed { + channel_id, + counterparty_node_id: *counterparty_node_id, + user_channel_id: channel.context().get_user_id(), + abandoned_funding_txo: splice_funding_failed.funding_txo, + channel_type: splice_funding_failed.channel_type.clone(), + contributed_inputs: splice_funding_failed.contributed_inputs, + contributed_outputs: splice_funding_failed.contributed_outputs, + }, None)); } + Err(MsgHandleErrInternal::from_chan_no_close(error, channel_id)) }, - }; - peer_state.pending_msg_events.push(msg_send_event); - Ok(()) + } }, hash_map::Entry::Vacant(_) => { Err(MsgHandleErrInternal::send_err_msg_no_close(format!( @@ -10275,7 +10460,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fn internal_tx_add_input( &self, counterparty_node_id: PublicKey, msg: &msgs::TxAddInput, - ) -> Result<(), MsgHandleErrInternal> { + ) -> Result { self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel| { channel.tx_add_input(msg, &self.logger) }) @@ -10283,7 +10468,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fn internal_tx_add_output( &self, counterparty_node_id: PublicKey, msg: &msgs::TxAddOutput, - ) -> Result<(), MsgHandleErrInternal> { + ) -> Result { self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel| { channel.tx_add_output(msg, &self.logger) }) @@ -10291,7 +10476,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fn internal_tx_remove_input( &self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveInput, - ) -> Result<(), MsgHandleErrInternal> { + ) -> Result { self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel| { channel.tx_remove_input(msg, &self.logger) }) @@ -10299,14 +10484,14 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fn internal_tx_remove_output( &self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveOutput, - ) -> Result<(), MsgHandleErrInternal> { + ) -> Result { self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel| { channel.tx_remove_output(msg, &self.logger) }) } #[rustfmt::skip] - fn internal_tx_complete(&self, counterparty_node_id: PublicKey, msg: &msgs::TxComplete) -> Result<(), MsgHandleErrInternal> { + fn internal_tx_complete(&self, counterparty_node_id: PublicKey, msg: &msgs::TxComplete) -> Result { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(&counterparty_node_id) .ok_or_else(|| { @@ -10322,6 +10507,11 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let chan = chan_entry.get_mut(); match chan.tx_complete(msg, &self.logger) { Ok((interactive_tx_msg_send, commitment_signed)) => { + let persist = if interactive_tx_msg_send.is_some() || commitment_signed.is_some() { + NotifyOption::SkipPersistHandleEvents + } else { + NotifyOption::SkipPersistNoEvents + }; if let Some(interactive_tx_msg_send) = interactive_tx_msg_send { let msg_send_event = interactive_tx_msg_send.into_msg_send_event(counterparty_node_id); peer_state.pending_msg_events.push(msg_send_event); @@ -10340,15 +10530,24 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }, }); } + Ok(persist) }, - Err(tx_abort) => { - peer_state.pending_msg_events.push(MessageSendEvent::SendTxAbort { - node_id: counterparty_node_id, - msg: tx_abort, - }); + Err((error, splice_funding_failed)) => { + if let Some(splice_funding_failed) = splice_funding_failed { + let pending_events = &mut self.pending_events.lock().unwrap(); + pending_events.push_back((events::Event::SpliceFailed { + channel_id: msg.channel_id, + counterparty_node_id, + user_channel_id: chan.context().get_user_id(), + abandoned_funding_txo: splice_funding_failed.funding_txo, + channel_type: splice_funding_failed.channel_type.clone(), + contributed_inputs: splice_funding_failed.contributed_inputs, + contributed_outputs: splice_funding_failed.contributed_outputs, + }, None)); + } + Err(MsgHandleErrInternal::from_chan_no_close(error, msg.channel_id)) }, } - Ok(()) }, hash_map::Entry::Vacant(_) => { Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) @@ -10373,20 +10572,33 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ hash_map::Entry::Occupied(mut chan_entry) => { match chan_entry.get_mut().as_funded_mut() { Some(chan) => { - let (tx_signatures_opt, funding_tx_opt) = try_channel_entry!(self, peer_state, chan.tx_signatures(msg), chan_entry); - if let Some(tx_signatures) = tx_signatures_opt { + let FundingTxSigned { tx_signatures, funding_tx, splice_negotiated } = + try_channel_entry!(self, peer_state, chan.tx_signatures(msg), chan_entry); + if let Some(tx_signatures) = tx_signatures { peer_state.pending_msg_events.push(MessageSendEvent::SendTxSignatures { node_id: *counterparty_node_id, msg: tx_signatures, }); } - if let Some(ref funding_tx) = funding_tx_opt { + if let Some(ref funding_tx) = funding_tx { self.tx_broadcaster.broadcast_transactions(&[funding_tx]); { let mut pending_events = self.pending_events.lock().unwrap(); emit_channel_pending_event!(pending_events, chan); } } + if let Some(splice_negotiated) = splice_negotiated { + self.pending_events.lock().unwrap().push_back(( + events::Event::SplicePending { + channel_id: msg.channel_id, + counterparty_node_id: *counterparty_node_id, + user_channel_id: chan.context.get_user_id(), + new_funding_txo: splice_negotiated.funding_txo, + channel_type: splice_negotiated.channel_type, + }, + None, + )); + } }, None => { let msg = "Got an unexpected tx_signatures message"; @@ -10405,7 +10617,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ #[rustfmt::skip] fn internal_tx_abort(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAbort) - -> Result<(), MsgHandleErrInternal> { + -> Result { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { @@ -10419,13 +10631,35 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan_entry) => { let res = chan_entry.get_mut().tx_abort(msg, &self.logger); - if let Some(msg) = try_channel_entry!(self, peer_state, res, chan_entry) { + let (tx_abort, splice_failed) = try_channel_entry!(self, peer_state, res, chan_entry); + + let persist = if tx_abort.is_some() || splice_failed.is_some() { + NotifyOption::DoPersist + } else { + NotifyOption::SkipPersistNoEvents + }; + + if let Some(tx_abort_msg) = tx_abort { peer_state.pending_msg_events.push(MessageSendEvent::SendTxAbort { node_id: *counterparty_node_id, - msg, + msg: tx_abort_msg, }); } - Ok(()) + + if let Some(splice_funding_failed) = splice_failed { + let pending_events = &mut self.pending_events.lock().unwrap(); + pending_events.push_back((events::Event::SpliceFailed { + channel_id: msg.channel_id, + counterparty_node_id: *counterparty_node_id, + user_channel_id: chan_entry.get().context().get_user_id(), + abandoned_funding_txo: splice_funding_failed.funding_txo, + channel_type: splice_funding_failed.channel_type, + contributed_inputs: splice_funding_failed.contributed_inputs, + contributed_outputs: splice_funding_failed.contributed_outputs, + }, None)); + } + + Ok(persist) }, hash_map::Entry::Vacant(_) => { Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) @@ -11337,7 +11571,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } #[rustfmt::skip] - fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result { + fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> { let (inferred_splice_locked, need_lnd_workaround) = { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -11448,10 +11682,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(splice_locked) = inferred_splice_locked { self.internal_splice_locked(counterparty_node_id, &splice_locked)?; - return Ok(NotifyOption::DoPersist); } - Ok(NotifyOption::SkipPersistHandleEvents) + Ok(()) } /// Handle incoming splice request, transition channel to splice-pending (unless some check fails). @@ -13358,107 +13591,136 @@ where #[rustfmt::skip] fn peer_disconnected(&self, counterparty_node_id: PublicKey) { - let _persistence_guard = PersistenceNotifierGuard::optionally_notify( - self, || NotifyOption::SkipPersistHandleEvents); - let mut failed_channels: Vec<(Result, _)> = Vec::new(); - let mut per_peer_state = self.per_peer_state.write().unwrap(); - let remove_peer = { - log_debug!( - WithContext::from(&self.logger, Some(counterparty_node_id), None, None), - "Marking channels with {} disconnected and generating channel_updates.", - log_pubkey!(counterparty_node_id) - ); - if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; - let pending_msg_events = &mut peer_state.pending_msg_events; - peer_state.channel_by_id.retain(|_, chan| { - let logger = WithChannelContext::from(&self.logger, &chan.context(), None); - if chan.peer_disconnected_is_resumable(&&logger) { - return true; - } - // Clean up for removal. - let reason = ClosureReason::DisconnectedPeer; - let err = ChannelError::Close((reason.to_string(), reason)); - let (_, e) = convert_channel_err!(self, peer_state, err, chan); - failed_channels.push((Err(e), counterparty_node_id)); - false - }); - // Note that we don't bother generating any events for pre-accept channels - - // they're not considered "channels" yet from the PoV of our events interface. - peer_state.inbound_channel_request_by_id.clear(); - pending_msg_events.retain(|msg| { - match msg { - // V1 Channel Establishment - &MessageSendEvent::SendAcceptChannel { .. } => false, - &MessageSendEvent::SendOpenChannel { .. } => false, - &MessageSendEvent::SendFundingCreated { .. } => false, - &MessageSendEvent::SendFundingSigned { .. } => false, - // V2 Channel Establishment - &MessageSendEvent::SendAcceptChannelV2 { .. } => false, - &MessageSendEvent::SendOpenChannelV2 { .. } => false, - // Common Channel Establishment - &MessageSendEvent::SendChannelReady { .. } => false, - &MessageSendEvent::SendAnnouncementSignatures { .. } => false, - // Quiescence - &MessageSendEvent::SendStfu { .. } => false, - // Splicing - &MessageSendEvent::SendSpliceInit { .. } => false, - &MessageSendEvent::SendSpliceAck { .. } => false, - &MessageSendEvent::SendSpliceLocked { .. } => false, - // Interactive Transaction Construction - &MessageSendEvent::SendTxAddInput { .. } => false, - &MessageSendEvent::SendTxAddOutput { .. } => false, - &MessageSendEvent::SendTxRemoveInput { .. } => false, - &MessageSendEvent::SendTxRemoveOutput { .. } => false, - &MessageSendEvent::SendTxComplete { .. } => false, - &MessageSendEvent::SendTxSignatures { .. } => false, - &MessageSendEvent::SendTxInitRbf { .. } => false, - &MessageSendEvent::SendTxAckRbf { .. } => false, - &MessageSendEvent::SendTxAbort { .. } => false, - // Channel Operations - &MessageSendEvent::UpdateHTLCs { .. } => false, - &MessageSendEvent::SendRevokeAndACK { .. } => false, - &MessageSendEvent::SendClosingSigned { .. } => false, - &MessageSendEvent::SendClosingComplete { .. } => false, - &MessageSendEvent::SendClosingSig { .. } => false, - &MessageSendEvent::SendShutdown { .. } => false, - &MessageSendEvent::SendChannelReestablish { .. } => false, - &MessageSendEvent::HandleError { .. } => false, - // Gossip - &MessageSendEvent::SendChannelAnnouncement { .. } => false, - &MessageSendEvent::BroadcastChannelAnnouncement { .. } => true, - // [`ChannelManager::pending_broadcast_events`] holds the [`BroadcastChannelUpdate`] - // This check here is to ensure exhaustivity. - &MessageSendEvent::BroadcastChannelUpdate { .. } => { - debug_assert!(false, "This event shouldn't have been here"); - false - }, - &MessageSendEvent::BroadcastNodeAnnouncement { .. } => true, - &MessageSendEvent::SendChannelUpdate { .. } => false, - &MessageSendEvent::SendChannelRangeQuery { .. } => false, - &MessageSendEvent::SendShortIdsQuery { .. } => false, - &MessageSendEvent::SendReplyChannelRange { .. } => false, - &MessageSendEvent::SendGossipTimestampFilter { .. } => false, - - // Peer Storage - &MessageSendEvent::SendPeerStorage { .. } => false, - &MessageSendEvent::SendPeerStorageRetrieval { .. } => false, - } - }); - debug_assert!(peer_state.is_connected, "A disconnected peer cannot disconnect"); - peer_state.is_connected = false; - peer_state.ok_to_remove(true) - } else { debug_assert!(false, "Unconnected peer disconnected"); true } - }; - if remove_peer { - per_peer_state.remove(&counterparty_node_id); - } - mem::drop(per_peer_state); + let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || { + let mut splice_failed_events = Vec::new(); + let mut failed_channels: Vec<(Result, _)> = Vec::new(); + let mut per_peer_state = self.per_peer_state.write().unwrap(); + let remove_peer = { + log_debug!( + WithContext::from(&self.logger, Some(counterparty_node_id), None, None), + "Marking channels with {} disconnected and generating channel_updates.", + log_pubkey!(counterparty_node_id) + ); + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + let pending_msg_events = &mut peer_state.pending_msg_events; + peer_state.channel_by_id.retain(|_, chan| { + let logger = WithChannelContext::from(&self.logger, &chan.context(), None); + let DisconnectResult { is_resumable, splice_funding_failed } = + chan.peer_disconnected_is_resumable(&&logger); + + if let Some(splice_funding_failed) = splice_funding_failed { + splice_failed_events.push(events::Event::SpliceFailed { + channel_id: chan.context().channel_id(), + counterparty_node_id, + user_channel_id: chan.context().get_user_id(), + abandoned_funding_txo: splice_funding_failed.funding_txo, + channel_type: splice_funding_failed.channel_type, + contributed_inputs: splice_funding_failed.contributed_inputs, + contributed_outputs: splice_funding_failed.contributed_outputs, + }); + } - for (err, counterparty_node_id) in failed_channels.drain(..) { - let _ = handle_error!(self, err, counterparty_node_id); - } + if is_resumable { + return true; + } + + // Clean up for removal. + let reason = ClosureReason::DisconnectedPeer; + let err = ChannelError::Close((reason.to_string(), reason)); + let (_, e) = convert_channel_err!(self, peer_state, err, chan); + failed_channels.push((Err(e), counterparty_node_id)); + false + }); + // Note that we don't bother generating any events for pre-accept channels - + // they're not considered "channels" yet from the PoV of our events interface. + peer_state.inbound_channel_request_by_id.clear(); + pending_msg_events.retain(|msg| { + match msg { + // V1 Channel Establishment + &MessageSendEvent::SendAcceptChannel { .. } => false, + &MessageSendEvent::SendOpenChannel { .. } => false, + &MessageSendEvent::SendFundingCreated { .. } => false, + &MessageSendEvent::SendFundingSigned { .. } => false, + // V2 Channel Establishment + &MessageSendEvent::SendAcceptChannelV2 { .. } => false, + &MessageSendEvent::SendOpenChannelV2 { .. } => false, + // Common Channel Establishment + &MessageSendEvent::SendChannelReady { .. } => false, + &MessageSendEvent::SendAnnouncementSignatures { .. } => false, + // Quiescence + &MessageSendEvent::SendStfu { .. } => false, + // Splicing + &MessageSendEvent::SendSpliceInit { .. } => false, + &MessageSendEvent::SendSpliceAck { .. } => false, + &MessageSendEvent::SendSpliceLocked { .. } => false, + // Interactive Transaction Construction + &MessageSendEvent::SendTxAddInput { .. } => false, + &MessageSendEvent::SendTxAddOutput { .. } => false, + &MessageSendEvent::SendTxRemoveInput { .. } => false, + &MessageSendEvent::SendTxRemoveOutput { .. } => false, + &MessageSendEvent::SendTxComplete { .. } => false, + &MessageSendEvent::SendTxSignatures { .. } => false, + &MessageSendEvent::SendTxInitRbf { .. } => false, + &MessageSendEvent::SendTxAckRbf { .. } => false, + &MessageSendEvent::SendTxAbort { .. } => false, + // Channel Operations + &MessageSendEvent::UpdateHTLCs { .. } => false, + &MessageSendEvent::SendRevokeAndACK { .. } => false, + &MessageSendEvent::SendClosingSigned { .. } => false, + &MessageSendEvent::SendClosingComplete { .. } => false, + &MessageSendEvent::SendClosingSig { .. } => false, + &MessageSendEvent::SendShutdown { .. } => false, + &MessageSendEvent::SendChannelReestablish { .. } => false, + &MessageSendEvent::HandleError { .. } => false, + // Gossip + &MessageSendEvent::SendChannelAnnouncement { .. } => false, + &MessageSendEvent::BroadcastChannelAnnouncement { .. } => true, + // [`ChannelManager::pending_broadcast_events`] holds the [`BroadcastChannelUpdate`] + // This check here is to ensure exhaustivity. + &MessageSendEvent::BroadcastChannelUpdate { .. } => { + debug_assert!(false, "This event shouldn't have been here"); + false + }, + &MessageSendEvent::BroadcastNodeAnnouncement { .. } => true, + &MessageSendEvent::SendChannelUpdate { .. } => false, + &MessageSendEvent::SendChannelRangeQuery { .. } => false, + &MessageSendEvent::SendShortIdsQuery { .. } => false, + &MessageSendEvent::SendReplyChannelRange { .. } => false, + &MessageSendEvent::SendGossipTimestampFilter { .. } => false, + + // Peer Storage + &MessageSendEvent::SendPeerStorage { .. } => false, + &MessageSendEvent::SendPeerStorageRetrieval { .. } => false, + } + }); + debug_assert!(peer_state.is_connected, "A disconnected peer cannot disconnect"); + peer_state.is_connected = false; + peer_state.ok_to_remove(true) + } else { debug_assert!(false, "Unconnected peer disconnected"); true } + }; + if remove_peer { + per_peer_state.remove(&counterparty_node_id); + } + mem::drop(per_peer_state); + + let persist = if splice_failed_events.is_empty() { + NotifyOption::SkipPersistHandleEvents + } else { + let mut pending_events = self.pending_events.lock().unwrap(); + for event in splice_failed_events { + pending_events.push_back((event, None)); + } + NotifyOption::DoPersist + }; + + for (err, counterparty_node_id) in failed_channels.drain(..) { + let _ = handle_error!(self, err, counterparty_node_id); + } + + persist + }); } #[rustfmt::skip] @@ -14570,16 +14832,9 @@ where fn handle_channel_reestablish( &self, counterparty_node_id: PublicKey, msg: &msgs::ChannelReestablish, ) { - let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || { - let res = self.internal_channel_reestablish(&counterparty_node_id, msg); - let persist = match &res { - Err(e) if e.closes_channel() => NotifyOption::DoPersist, - Err(_) => NotifyOption::SkipPersistHandleEvents, - Ok(persist) => *persist, - }; - let _ = handle_error!(self, res, counterparty_node_id); - persist - }); + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + let res = self.internal_channel_reestablish(&counterparty_node_id, msg); + let _ = handle_error!(self, res, counterparty_node_id); } #[rustfmt::skip] @@ -14700,57 +14955,62 @@ where } fn handle_tx_add_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddInput) { - // Note that we never need to persist the updated ChannelManager for an inbound - // tx_add_input message - interactive transaction construction does not need to - // be persisted before any signatures are exchanged. let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || { let res = self.internal_tx_add_input(counterparty_node_id, msg); + let persist = match &res { + Err(_) => NotifyOption::DoPersist, + Ok(persist) => *persist, + }; let _ = handle_error!(self, res, counterparty_node_id); - NotifyOption::SkipPersistHandleEvents + persist }); } fn handle_tx_add_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddOutput) { - // Note that we never need to persist the updated ChannelManager for an inbound - // tx_add_output message - interactive transaction construction does not need to - // be persisted before any signatures are exchanged. let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || { let res = self.internal_tx_add_output(counterparty_node_id, msg); + let persist = match &res { + Err(_) => NotifyOption::DoPersist, + Ok(persist) => *persist, + }; let _ = handle_error!(self, res, counterparty_node_id); - NotifyOption::SkipPersistHandleEvents + persist }); } fn handle_tx_remove_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveInput) { - // Note that we never need to persist the updated ChannelManager for an inbound - // tx_remove_input message - interactive transaction construction does not need to - // be persisted before any signatures are exchanged. let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || { let res = self.internal_tx_remove_input(counterparty_node_id, msg); + let persist = match &res { + Err(_) => NotifyOption::DoPersist, + Ok(persist) => *persist, + }; let _ = handle_error!(self, res, counterparty_node_id); - NotifyOption::SkipPersistHandleEvents + persist }); } fn handle_tx_remove_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveOutput) { - // Note that we never need to persist the updated ChannelManager for an inbound - // tx_remove_output message - interactive transaction construction does not need to - // be persisted before any signatures are exchanged. let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || { let res = self.internal_tx_remove_output(counterparty_node_id, msg); + let persist = match &res { + Err(_) => NotifyOption::DoPersist, + Ok(persist) => *persist, + }; let _ = handle_error!(self, res, counterparty_node_id); - NotifyOption::SkipPersistHandleEvents + persist }); } fn handle_tx_complete(&self, counterparty_node_id: PublicKey, msg: &msgs::TxComplete) { - // Note that we never need to persist the updated ChannelManager for an inbound - // tx_complete message - interactive transaction construction does not need to - // be persisted before any signatures are exchanged. let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || { let res = self.internal_tx_complete(counterparty_node_id, msg); + let persist = match &res { + Err(_) => NotifyOption::DoPersist, + Ok(persist) => *persist, + }; let _ = handle_error!(self, res, counterparty_node_id); - NotifyOption::SkipPersistHandleEvents + persist }); } @@ -14782,8 +15042,13 @@ where // be persisted before any signatures are exchanged. let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || { let res = self.internal_tx_abort(&counterparty_node_id, msg); + let persist = match &res { + Err(e) if e.closes_channel() => NotifyOption::DoPersist, + Err(_) => NotifyOption::SkipPersistHandleEvents, + Ok(persist) => *persist, + }; let _ = handle_error!(self, res, counterparty_node_id); - NotifyOption::SkipPersistHandleEvents + persist }); } @@ -15893,7 +16158,32 @@ where } } - let events = self.pending_events.lock().unwrap(); + + // Since some FundingNegotiation variants are not persisted, any splice in such state must + // be failed upon reload. However, as the necessary information for the SpliceFailed event + // is not persisted, the event itself needs to be persisted even though it hasn't been + // emitted yet. These are removed after the events are written. + let mut events = self.pending_events.lock().unwrap(); + let event_count = events.len(); + for peer_state in peer_states.iter() { + for chan in peer_state.channel_by_id.values().filter_map(Channel::as_funded) { + if let Some(splice_funding_failed) = chan.maybe_splice_funding_failed() { + events.push_back(( + events::Event::SpliceFailed { + channel_id: chan.context.channel_id(), + counterparty_node_id: chan.context.get_counterparty_node_id(), + user_channel_id: chan.context.get_user_id(), + abandoned_funding_txo: splice_funding_failed.funding_txo, + channel_type: splice_funding_failed.channel_type, + contributed_inputs: splice_funding_failed.contributed_inputs, + contributed_outputs: splice_funding_failed.contributed_outputs, + }, + None, + )); + } + } + } + // LDK versions prior to 0.0.115 don't support post-event actions, thus if there's no // actions at all, skip writing the required TLV. Otherwise, pre-0.0.115 versions will // refuse to read the new ChannelManager. @@ -16010,6 +16300,9 @@ where (21, WithoutLength(&self.flow.writeable_async_receive_offer_cache()), required), }); + // Remove the SpliceFailed events added earlier. + events.truncate(event_count); + Ok(()) } } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 0bf60e4481d..ec3a7d07910 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1054,6 +1054,29 @@ pub fn get_err_msg(node: &Node, recipient: &PublicKey) -> msgs::ErrorMessage { } } +/// Get a warning message from the pending events queue. +pub fn get_warning_msg(node: &Node, recipient: &PublicKey) -> msgs::WarningMessage { + let events = node.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::HandleError { + action: msgs::ErrorAction::DisconnectPeerWithWarning { ref msg }, + ref node_id, + } => { + assert_eq!(node_id, recipient); + (*msg).clone() + }, + MessageSendEvent::HandleError { + action: msgs::ErrorAction::SendWarningMessage { ref msg, .. }, + ref node_id, + } => { + assert_eq!(node_id, recipient); + msg.clone() + }, + _ => panic!("Unexpected event"), + } +} + /// Get a specific event from the pending events queue. #[macro_export] macro_rules! get_event { @@ -2128,6 +2151,7 @@ pub struct ExpectedCloseEvent { pub channel_id: Option, pub counterparty_node_id: Option, pub discard_funding: bool, + pub splice_failed: bool, pub reason: Option, pub channel_funding_txo: Option, pub user_channel_id: Option, @@ -2142,6 +2166,7 @@ impl ExpectedCloseEvent { channel_id: Some(channel_id), counterparty_node_id: None, discard_funding, + splice_failed: false, reason: Some(reason), channel_funding_txo: None, user_channel_id: None, @@ -2153,8 +2178,14 @@ impl ExpectedCloseEvent { pub fn check_closed_events(node: &Node, expected_close_events: &[ExpectedCloseEvent]) { let closed_events_count = expected_close_events.len(); let discard_events_count = expected_close_events.iter().filter(|e| e.discard_funding).count(); + let splice_events_count = expected_close_events.iter().filter(|e| e.splice_failed).count(); let events = node.node.get_and_clear_pending_events(); - assert_eq!(events.len(), closed_events_count + discard_events_count, "{:?}", events); + assert_eq!( + events.len(), + closed_events_count + discard_events_count + splice_events_count, + "{:?}", + events + ); for expected_event in expected_close_events { assert!(events.iter().any(|e| matches!( e, @@ -2184,6 +2215,10 @@ pub fn check_closed_events(node: &Node, expected_close_events: &[ExpectedCloseEv events.iter().filter(|e| matches!(e, Event::DiscardFunding { .. },)).count(), discard_events_count ); + assert_eq!( + events.iter().filter(|e| matches!(e, Event::SpliceFailed { .. },)).count(), + splice_events_count + ); } /// Check that a channel's closing channel events has been issued @@ -2205,6 +2240,7 @@ pub fn check_closed_event( channel_id: None, counterparty_node_id: Some(*node_id), discard_funding: is_check_discard_funding, + splice_failed: false, reason: Some(expected_reason.clone()), channel_funding_txo: None, user_channel_id: None, @@ -3067,6 +3103,21 @@ pub fn expect_channel_ready_event<'a, 'b, 'c, 'd>( } } +#[cfg(any(test, ldk_bench, feature = "_test_utils"))] +pub fn expect_splice_pending_event<'a, 'b, 'c, 'd>( + node: &'a Node<'b, 'c, 'd>, expected_counterparty_node_id: &PublicKey, +) -> ChannelId { + let events = node.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match &events[0] { + crate::events::Event::SplicePending { channel_id, counterparty_node_id, .. } => { + assert_eq!(*expected_counterparty_node_id, *counterparty_node_id); + *channel_id + }, + _ => panic!("Unexpected event"), + } +} + pub fn expect_probe_successful_events( node: &Node, mut probe_results: Vec<(PaymentHash, PaymentId)>, ) { diff --git a/lightning/src/ln/funding.rs b/lightning/src/ln/funding.rs index cc90e104aa1..db1d9169c56 100644 --- a/lightning/src/ln/funding.rs +++ b/lightning/src/ln/funding.rs @@ -204,6 +204,11 @@ impl FundingTxInput { FundingTxInput::new(prevtx, vout, Weight::ZERO, Script::is_p2pkh) } + /// The outpoint of the UTXO being spent. + pub fn outpoint(&self) -> bitcoin::OutPoint { + self.utxo.outpoint + } + /// The sequence number to use in the [`TxIn`]. /// /// [`TxIn`]: bitcoin::TxIn diff --git a/lightning/src/ln/interactivetxs.rs b/lightning/src/ln/interactivetxs.rs index d1cac891c11..a912db02a20 100644 --- a/lightning/src/ln/interactivetxs.rs +++ b/lightning/src/ln/interactivetxs.rs @@ -345,6 +345,11 @@ impl ConstructedTransaction { } fn into_negotiation_error(self, reason: AbortReason) -> NegotiationError { + let (contributed_inputs, contributed_outputs) = self.into_contributed_inputs_and_outputs(); + NegotiationError { reason, contributed_inputs, contributed_outputs } + } + + fn into_contributed_inputs_and_outputs(self) -> (Vec, Vec) { let contributed_inputs = self .tx .input @@ -371,7 +376,7 @@ impl ConstructedTransaction { .map(|(_, (txout, _))| txout) .collect(); - NegotiationError { reason, contributed_inputs, contributed_outputs } + (contributed_inputs, contributed_outputs) } pub fn tx(&self) -> &Transaction { @@ -852,6 +857,10 @@ impl InteractiveTxSigningSession { pub(crate) fn into_negotiation_error(self, reason: AbortReason) -> NegotiationError { self.unsigned_tx.into_negotiation_error(reason) } + + pub(super) fn into_contributed_inputs_and_outputs(self) -> (Vec, Vec) { + self.unsigned_tx.into_contributed_inputs_and_outputs() + } } impl_writeable_tlv_based!(InteractiveTxSigningSession, { @@ -1885,6 +1894,7 @@ impl InteractiveTxInput { pub(super) struct InteractiveTxConstructor { state_machine: StateMachine, + is_initiator: bool, initiator_first_message: Option, channel_id: ChannelId, inputs_to_contribute: Vec<(SerialId, InputOwned)>, @@ -2047,6 +2057,7 @@ impl InteractiveTxConstructor { let mut constructor = Self { state_machine, + is_initiator, initiator_first_message: None, channel_id, inputs_to_contribute, @@ -2069,21 +2080,44 @@ impl InteractiveTxConstructor { } fn into_negotiation_error(self, reason: AbortReason) -> NegotiationError { - NegotiationError { - reason, - contributed_inputs: self - .inputs_to_contribute - .into_iter() - .filter(|(_, input)| !input.is_shared()) - .map(|(_, input)| input.into_tx_in().previous_output) - .collect(), - contributed_outputs: self - .outputs_to_contribute - .into_iter() - .filter(|(_, output)| !output.is_shared()) - .map(|(_, output)| output.into_tx_out()) - .collect(), - } + let (contributed_inputs, contributed_outputs) = self.into_contributed_inputs_and_outputs(); + NegotiationError { reason, contributed_inputs, contributed_outputs } + } + + pub(super) fn into_contributed_inputs_and_outputs(self) -> (Vec, Vec) { + let contributed_inputs = self + .inputs_to_contribute + .into_iter() + .filter(|(_, input)| !input.is_shared()) + .map(|(_, input)| input.into_tx_in().previous_output) + .collect(); + let contributed_outputs = self + .outputs_to_contribute + .into_iter() + .filter(|(_, output)| !output.is_shared()) + .map(|(_, output)| output.into_tx_out()) + .collect(); + (contributed_inputs, contributed_outputs) + } + + pub(super) fn to_contributed_inputs_and_outputs(&self) -> (Vec, Vec) { + let contributed_inputs = self + .inputs_to_contribute + .iter() + .filter(|(_, input)| !input.is_shared()) + .map(|(_, input)| input.tx_in().previous_output) + .collect(); + let contributed_outputs = self + .outputs_to_contribute + .iter() + .filter(|(_, output)| !output.is_shared()) + .map(|(_, output)| output.tx_out().clone()) + .collect(); + (contributed_inputs, contributed_outputs) + } + + pub fn is_initiator(&self) -> bool { + self.is_initiator } pub fn take_initiator_first_message(&mut self) -> Option { diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index b316381398e..30ee01fdbd7 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -1467,6 +1467,7 @@ fn do_test_revoked_counterparty_commitment_balances(keyed_anchors: bool, p2a_anc channel_id: Some(chan_id), counterparty_node_id: Some(nodes[0].node.get_our_node_id()), discard_funding: false, + splice_failed: false, reason: None, // Could be due to any HTLC timing out, so don't bother checking channel_funding_txo: None, user_channel_id: None, diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index b040f454e61..ede6acfb639 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -504,6 +504,7 @@ fn test_set_outpoints_partial_claiming() { channel_id: Some(chan.2), counterparty_node_id: Some(nodes[0].node.get_our_node_id()), discard_funding: false, + splice_failed: false, reason: None, // Could be due to either HTLC timing out, so don't bother checking channel_funding_txo: None, user_channel_id: None, diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 054842abd9b..437298afc22 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -636,6 +636,7 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { channel_id: None, counterparty_node_id: Some(node_a_id), discard_funding: false, + splice_failed: false, reason: Some(ClosureReason::LocallyInitiatedCooperativeClosure), channel_funding_txo: None, user_channel_id: None, @@ -645,6 +646,7 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { channel_id: None, counterparty_node_id: Some(node_c_id), discard_funding: false, + splice_failed: false, reason: Some(ClosureReason::CounterpartyInitiatedCooperativeClosure), channel_funding_txo: None, user_channel_id: None, diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index ac84eeacaf8..3edd051d735 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -21,6 +21,7 @@ use crate::ln::msgs::{self, BaseMessageHandler, ChannelMessageHandler, MessageSe use crate::ln::types::ChannelId; use crate::util::errors::APIError; use crate::util::ser::Writeable; +use crate::util::test_channel_signer::SignerOp; use bitcoin::{Amount, OutPoint as BitcoinOutPoint, ScriptBuf, Transaction, TxOut}; @@ -68,6 +69,21 @@ fn negotiate_splice_tx<'a, 'b, 'c, 'd>( initiator: &'a Node<'b, 'c, 'd>, acceptor: &'a Node<'b, 'c, 'd>, channel_id: ChannelId, initiator_contribution: SpliceContribution, ) -> msgs::CommitmentSigned { + let new_funding_script = + complete_splice_handshake(initiator, acceptor, channel_id, initiator_contribution.clone()); + complete_interactive_funding_negotiation( + initiator, + acceptor, + channel_id, + initiator_contribution, + new_funding_script, + ) +} + +fn complete_splice_handshake<'a, 'b, 'c, 'd>( + initiator: &'a Node<'b, 'c, 'd>, acceptor: &'a Node<'b, 'c, 'd>, channel_id: ChannelId, + initiator_contribution: SpliceContribution, +) -> ScriptBuf { let node_id_initiator = initiator.node.get_our_node_id(); let node_id_acceptor = acceptor.node.get_our_node_id(); @@ -76,7 +92,7 @@ fn negotiate_splice_tx<'a, 'b, 'c, 'd>( .splice_channel( &channel_id, &node_id_acceptor, - initiator_contribution.clone(), + initiator_contribution, FEERATE_FLOOR_SATS_PER_KW, None, ) @@ -98,13 +114,7 @@ fn negotiate_splice_tx<'a, 'b, 'c, 'd>( ) .to_p2wsh(); - complete_interactive_funding_negotiation( - initiator, - acceptor, - channel_id, - initiator_contribution, - new_funding_script, - ) + new_funding_script } fn complete_interactive_funding_negotiation<'a, 'b, 'c, 'd>( @@ -246,8 +256,19 @@ fn splice_channel<'a, 'b, 'c, 'd>( initiator: &'a Node<'b, 'c, 'd>, acceptor: &'a Node<'b, 'c, 'd>, channel_id: ChannelId, initiator_contribution: SpliceContribution, ) -> Transaction { - let initial_commit_sig_for_acceptor = - negotiate_splice_tx(initiator, acceptor, channel_id, initiator_contribution); + let node_id_initiator = initiator.node.get_our_node_id(); + let node_id_acceptor = acceptor.node.get_our_node_id(); + + let new_funding_script = + complete_splice_handshake(initiator, acceptor, channel_id, initiator_contribution.clone()); + + let initial_commit_sig_for_acceptor = complete_interactive_funding_negotiation( + initiator, + acceptor, + channel_id, + initiator_contribution, + new_funding_script, + ); sign_interactive_funding_transaction(initiator, acceptor, initial_commit_sig_for_acceptor); let splice_tx = { @@ -257,6 +278,10 @@ fn splice_channel<'a, 'b, 'c, 'd>( assert_eq!(initiator_txn, acceptor_txn); initiator_txn.remove(0) }; + + expect_splice_pending_event(initiator, &node_id_acceptor); + expect_splice_pending_event(acceptor, &node_id_initiator); + splice_tx } @@ -408,6 +433,8 @@ fn do_test_splice_state_reset_on_disconnect(reload: bool) { nodes[1].node.peer_disconnected(node_id_0); } + let _event = get_event!(nodes[0], Event::SpliceFailed); + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); reconnect_args.send_channel_ready = (true, true); reconnect_args.send_announcement_sigs = (true, true); @@ -465,6 +492,8 @@ fn do_test_splice_state_reset_on_disconnect(reload: bool) { nodes[1].node.peer_disconnected(node_id_0); } + let _event = get_event!(nodes[0], Event::SpliceFailed); + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); reconnect_args.send_channel_ready = (true, true); reconnect_args.send_announcement_sigs = (true, true); @@ -559,6 +588,9 @@ fn test_config_reject_inbound_splices() { nodes[0].node.peer_disconnected(node_id_1); nodes[1].node.peer_disconnected(node_id_0); + + let _event = get_event!(nodes[0], Event::SpliceFailed); + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); reconnect_args.send_channel_ready = (true, true); reconnect_args.send_announcement_sigs = (true, true); @@ -999,11 +1031,13 @@ fn do_test_splice_reestablish(reload: bool, async_monitor_update: bool) { nodes[0].node.funding_transaction_signed(&channel_id, &node_id_1, tx).unwrap(); } let _ = get_event_msg!(nodes[0], MessageSendEvent::SendTxSignatures, node_id_1); + expect_splice_pending_event(&nodes[0], &node_id_1); // Reconnect to make sure node 0 retransmits its `tx_signatures` as it was never delivered. reconnect_nodes!(|reconnect_args: &mut ReconnectArgs| { reconnect_args.send_interactive_tx_sigs = (false, true); }); + expect_splice_pending_event(&nodes[1], &node_id_0); // Reestablish the channel again to make sure node 0 doesn't retransmit `tx_signatures` // unnecessarily as it was delivered in the previous reestablishment. @@ -1114,3 +1148,314 @@ fn do_test_splice_reestablish(reload: bool, async_monitor_update: bool) { .chain_source .remove_watched_txn_and_outputs(prev_funding_outpoint, prev_funding_script); } + +#[test] +fn disconnect_on_unexpected_interactive_tx_message() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let config = test_default_anchors_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let initiator = &nodes[0]; + let acceptor = &nodes[1]; + + let _node_id_initiator = initiator.node.get_our_node_id(); + let node_id_acceptor = acceptor.node.get_our_node_id(); + + let initial_channel_capacity = 100_000; + let (_, _, channel_id, _) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, initial_channel_capacity, 0); + + let coinbase_tx = provide_anchor_reserves(&nodes); + let splice_in_amount = initial_channel_capacity / 2; + let contribution = SpliceContribution::SpliceIn { + value: Amount::from_sat(splice_in_amount), + inputs: vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], + change_script: Some(nodes[0].wallet_source.get_change_script().unwrap()), + }; + + // Complete interactive-tx construction, but fail by having the acceptor send a duplicate + // tx_complete instead of commitment_signed. + let _ = negotiate_splice_tx(initiator, acceptor, channel_id, contribution.clone()); + + let mut msg_events = acceptor.node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1); + assert!(matches!(msg_events.remove(0), MessageSendEvent::UpdateHTLCs { .. })); + + let tx_complete = msgs::TxComplete { channel_id }; + initiator.node.handle_tx_complete(node_id_acceptor, &tx_complete); + + let _warning = get_warning_msg(initiator, &node_id_acceptor); +} + +#[test] +fn fail_splice_on_interactive_tx_error() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let config = test_default_anchors_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let initiator = &nodes[0]; + let acceptor = &nodes[1]; + + let node_id_initiator = initiator.node.get_our_node_id(); + let node_id_acceptor = acceptor.node.get_our_node_id(); + + let initial_channel_capacity = 100_000; + let (_, _, channel_id, _) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, initial_channel_capacity, 0); + + let coinbase_tx = provide_anchor_reserves(&nodes); + let splice_in_amount = initial_channel_capacity / 2; + let contribution = SpliceContribution::SpliceIn { + value: Amount::from_sat(splice_in_amount), + inputs: vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], + change_script: Some(nodes[0].wallet_source.get_change_script().unwrap()), + }; + + // Fail during interactive-tx construction by having the acceptor echo back tx_add_input instead + // of sending tx_complete. The failure occurs because the serial id will have the wrong parity. + let _ = complete_splice_handshake(initiator, acceptor, channel_id, contribution.clone()); + + let tx_add_input = + get_event_msg!(initiator, MessageSendEvent::SendTxAddInput, node_id_acceptor); + acceptor.node.handle_tx_add_input(node_id_initiator, &tx_add_input); + + let _tx_complete = + get_event_msg!(acceptor, MessageSendEvent::SendTxComplete, node_id_initiator); + initiator.node.handle_tx_add_input(node_id_acceptor, &tx_add_input); + + let event = get_event!(initiator, Event::SpliceFailed); + match event { + Event::SpliceFailed { contributed_inputs, .. } => { + assert_eq!(contributed_inputs.len(), 1); + assert_eq!(contributed_inputs[0], contribution.inputs()[0].outpoint()); + }, + _ => panic!("Expected Event::SpliceFailed"), + } + + let tx_abort = get_event_msg!(initiator, MessageSendEvent::SendTxAbort, node_id_acceptor); + acceptor.node.handle_tx_abort(node_id_initiator, &tx_abort); + + let tx_abort = get_event_msg!(acceptor, MessageSendEvent::SendTxAbort, node_id_initiator); + initiator.node.handle_tx_abort(node_id_acceptor, &tx_abort); + + // Fail signing the commitment transaction, which prevents the initiator from sending + // tx_complete. + initiator.disable_channel_signer_op( + &node_id_acceptor, + &channel_id, + SignerOp::SignCounterpartyCommitment, + ); + let _ = complete_splice_handshake(initiator, acceptor, channel_id, contribution.clone()); + + let tx_add_input = + get_event_msg!(initiator, MessageSendEvent::SendTxAddInput, node_id_acceptor); + acceptor.node.handle_tx_add_input(node_id_initiator, &tx_add_input); + + let tx_complete = get_event_msg!(acceptor, MessageSendEvent::SendTxComplete, node_id_initiator); + initiator.node.handle_tx_complete(node_id_acceptor, &tx_complete); + + let tx_add_input = + get_event_msg!(initiator, MessageSendEvent::SendTxAddInput, node_id_acceptor); + acceptor.node.handle_tx_add_input(node_id_initiator, &tx_add_input); + + let tx_complete = get_event_msg!(acceptor, MessageSendEvent::SendTxComplete, node_id_initiator); + initiator.node.handle_tx_complete(node_id_acceptor, &tx_complete); + + let tx_add_output = + get_event_msg!(initiator, MessageSendEvent::SendTxAddOutput, node_id_acceptor); + acceptor.node.handle_tx_add_output(node_id_initiator, &tx_add_output); + + let tx_complete = get_event_msg!(acceptor, MessageSendEvent::SendTxComplete, node_id_initiator); + initiator.node.handle_tx_complete(node_id_acceptor, &tx_complete); + + let tx_add_output = + get_event_msg!(initiator, MessageSendEvent::SendTxAddOutput, node_id_acceptor); + acceptor.node.handle_tx_add_output(node_id_initiator, &tx_add_output); + + let tx_complete = get_event_msg!(acceptor, MessageSendEvent::SendTxComplete, node_id_initiator); + initiator.node.handle_tx_complete(node_id_acceptor, &tx_complete); + + let event = get_event!(initiator, Event::SpliceFailed); + match event { + Event::SpliceFailed { contributed_inputs, .. } => { + assert_eq!(contributed_inputs.len(), 1); + assert_eq!(contributed_inputs[0], contribution.inputs()[0].outpoint()); + }, + _ => panic!("Expected Event::SpliceFailed"), + } + + let tx_abort = get_event_msg!(initiator, MessageSendEvent::SendTxAbort, node_id_acceptor); + acceptor.node.handle_tx_abort(node_id_initiator, &tx_abort); + + let tx_abort = get_event_msg!(acceptor, MessageSendEvent::SendTxAbort, node_id_initiator); + initiator.node.handle_tx_abort(node_id_acceptor, &tx_abort); +} + +#[test] +fn fail_splice_on_tx_abort() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let config = test_default_anchors_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let initiator = &nodes[0]; + let acceptor = &nodes[1]; + + let node_id_initiator = initiator.node.get_our_node_id(); + let node_id_acceptor = acceptor.node.get_our_node_id(); + + let initial_channel_capacity = 100_000; + let (_, _, channel_id, _) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, initial_channel_capacity, 0); + + let coinbase_tx = provide_anchor_reserves(&nodes); + let splice_in_amount = initial_channel_capacity / 2; + let contribution = SpliceContribution::SpliceIn { + value: Amount::from_sat(splice_in_amount), + inputs: vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], + change_script: Some(nodes[0].wallet_source.get_change_script().unwrap()), + }; + + // Fail during interactive-tx construction by having the acceptor send tx_abort instead of + // tx_complete. + let _ = complete_splice_handshake(initiator, acceptor, channel_id, contribution.clone()); + + let tx_add_input = + get_event_msg!(initiator, MessageSendEvent::SendTxAddInput, node_id_acceptor); + acceptor.node.handle_tx_add_input(node_id_initiator, &tx_add_input); + + let _tx_complete = + get_event_msg!(acceptor, MessageSendEvent::SendTxComplete, node_id_initiator); + + acceptor.node.abandon_splice(&channel_id, &node_id_initiator).unwrap(); + let tx_abort = get_event_msg!(acceptor, MessageSendEvent::SendTxAbort, node_id_initiator); + initiator.node.handle_tx_abort(node_id_acceptor, &tx_abort); + + let event = get_event!(initiator, Event::SpliceFailed); + match event { + Event::SpliceFailed { contributed_inputs, .. } => { + assert_eq!(contributed_inputs.len(), 1); + assert_eq!(contributed_inputs[0], contribution.inputs()[0].outpoint()); + }, + _ => panic!("Expected Event::SpliceFailed"), + } + + let tx_abort = get_event_msg!(initiator, MessageSendEvent::SendTxAbort, node_id_acceptor); + acceptor.node.handle_tx_abort(node_id_initiator, &tx_abort); +} + +#[test] +fn fail_splice_on_channel_close() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let config = test_default_anchors_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let initiator = &nodes[0]; + let acceptor = &nodes[1]; + + let _node_id_initiator = initiator.node.get_our_node_id(); + let node_id_acceptor = acceptor.node.get_our_node_id(); + + let initial_channel_capacity = 100_000; + let (_, _, channel_id, _) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, initial_channel_capacity, 0); + + let coinbase_tx = provide_anchor_reserves(&nodes); + let splice_in_amount = initial_channel_capacity / 2; + let contribution = SpliceContribution::SpliceIn { + value: Amount::from_sat(splice_in_amount), + inputs: vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], + change_script: Some(nodes[0].wallet_source.get_change_script().unwrap()), + }; + + // Close the channel before completion of interactive-tx construction. + let _ = complete_splice_handshake(initiator, acceptor, channel_id, contribution.clone()); + let _tx_add_input = + get_event_msg!(initiator, MessageSendEvent::SendTxAddInput, node_id_acceptor); + + initiator + .node + .force_close_broadcasting_latest_txn(&channel_id, &node_id_acceptor, "test".to_owned()) + .unwrap(); + handle_bump_events(initiator, true, 0); + check_closed_events( + &nodes[0], + &[ExpectedCloseEvent { + channel_id: Some(channel_id), + discard_funding: false, + splice_failed: true, + channel_funding_txo: None, + user_channel_id: Some(42), + ..Default::default() + }], + ); + check_closed_broadcast(&nodes[0], 1, true); + check_added_monitors(&nodes[0], 1); +} + +#[test] +fn fail_quiescent_action_on_channel_close() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let config = test_default_anchors_channel_config(); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let initiator = &nodes[0]; + let acceptor = &nodes[1]; + + let _node_id_initiator = initiator.node.get_our_node_id(); + let node_id_acceptor = acceptor.node.get_our_node_id(); + + let initial_channel_capacity = 100_000; + let (_, _, channel_id, _) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, initial_channel_capacity, 0); + + let coinbase_tx = provide_anchor_reserves(&nodes); + let splice_in_amount = initial_channel_capacity / 2; + let contribution = SpliceContribution::SpliceIn { + value: Amount::from_sat(splice_in_amount), + inputs: vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], + change_script: Some(nodes[0].wallet_source.get_change_script().unwrap()), + }; + + // Close the channel before completion of STFU handshake. + initiator + .node + .splice_channel( + &channel_id, + &node_id_acceptor, + contribution, + FEERATE_FLOOR_SATS_PER_KW, + None, + ) + .unwrap(); + + let _stfu_init = get_event_msg!(initiator, MessageSendEvent::SendStfu, node_id_acceptor); + + initiator + .node + .force_close_broadcasting_latest_txn(&channel_id, &node_id_acceptor, "test".to_owned()) + .unwrap(); + handle_bump_events(initiator, true, 0); + check_closed_events( + &nodes[0], + &[ExpectedCloseEvent { + channel_id: Some(channel_id), + discard_funding: false, + splice_failed: true, + channel_funding_txo: None, + user_channel_id: Some(42), + ..Default::default() + }], + ); + check_closed_broadcast(&nodes[0], 1, true); + check_added_monitors(&nodes[0], 1); +}