diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 6a2007165d7..2a1b9e9a70a 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -155,7 +155,7 @@ impl chain::Watch for TestChainMonitor { }; let deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>:: read(&mut Cursor::new(&map_entry.get().1), (&*self.keys, &*self.keys)).unwrap().1; - deserialized_monitor.update_monitor(update, &&TestBroadcaster{}, &FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger).unwrap(); + deserialized_monitor.update_monitor(update, &&TestBroadcaster{}, &&FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger).unwrap(); let mut ser = VecWriter(Vec::new()); deserialized_monitor.write(&mut ser).unwrap(); map_entry.insert((update.update_id, ser.0)); diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index efa1a42142c..643cae590aa 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -1181,7 +1181,7 @@ mod tests { let network_graph = Arc::new(NetworkGraph::new(network, logger.clone())); let scorer = Arc::new(Mutex::new(TestScorer::new())); let seed = [i as u8; 32]; - let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone(), ())); + let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone(), Default::default())); let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Bitcoin)); let kv_store = Arc::new(FilesystemStore::new(format!("{}_persister_{}", &persist_dir, i).into())); let now = Duration::from_secs(genesis_block.header.time as u64); diff --git a/lightning-invoice/src/payment.rs b/lightning-invoice/src/payment.rs index 08be3d54ba5..0247913634a 100644 --- a/lightning-invoice/src/payment.rs +++ b/lightning-invoice/src/payment.rs @@ -9,7 +9,8 @@ //! Convenient utilities for paying Lightning invoices. -use crate::{Bolt11Invoice, Vec}; +use crate::Bolt11Invoice; +use crate::prelude::*; use bitcoin_hashes::Hash; diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 261c0471ca4..e87d082d9a7 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -324,7 +324,6 @@ where C::Target: chain::Filter, if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state).is_err() { // Take the monitors lock for writing so that we poison it and any future // operations going forward fail immediately. - core::mem::drop(monitor_state); core::mem::drop(monitor_lock); let _poison = self.monitors.write().unwrap(); log_error!(self.logger, "{}", err_str); @@ -767,7 +766,7 @@ where C::Target: chain::Filter, Some(monitor_state) => { let monitor = &monitor_state.monitor; log_trace!(self.logger, "Updating ChannelMonitor for channel {}", log_funding_info!(monitor)); - let update_res = monitor.update_monitor(update, &self.broadcaster, &*self.fee_estimator, &self.logger); + let update_res = monitor.update_monitor(update, &self.broadcaster, &self.fee_estimator, &self.logger); let update_id = MonitorUpdateId::from_monitor_update(update); let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap(); diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index a0ac1657e93..bd0c1548428 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1311,7 +1311,7 @@ impl ChannelMonitor { &self, updates: &ChannelMonitorUpdate, broadcaster: &B, - fee_estimator: F, + fee_estimator: &F, logger: &L, ) -> Result<(), ()> where @@ -2615,7 +2615,7 @@ impl ChannelMonitorImpl { self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0)); } - pub fn update_monitor(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: F, logger: &L) -> Result<(), ()> + pub fn update_monitor(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &L) -> Result<(), ()> where B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, @@ -2655,7 +2655,7 @@ impl ChannelMonitorImpl { panic!("Attempted to apply ChannelMonitorUpdates out of order, check the update_id before passing an update to update_monitor!"); } let mut ret = Ok(()); - let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&*fee_estimator); + let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&**fee_estimator); for update in updates.updates.iter() { match update { ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { commitment_tx, htlc_outputs, claimed_htlcs, nondust_htlc_sources } => { @@ -4581,7 +4581,7 @@ mod tests { let broadcaster = TestBroadcaster::with_blocks(Arc::clone(&nodes[1].blocks)); assert!( - pre_update_monitor.update_monitor(&replay_update, &&broadcaster, &chanmon_cfgs[1].fee_estimator, &nodes[1].logger) + pre_update_monitor.update_monitor(&replay_update, &&broadcaster, &&chanmon_cfgs[1].fee_estimator, &nodes[1].logger) .is_err()); // Even though we error'd on the first update, we should still have generated an HTLC claim // transaction diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 3f6a2a6b20d..8c9b8337e18 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -840,6 +840,9 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = >; /// A trivial trait which describes any [`ChannelManager`]. +/// +/// This is not exported to bindings users as general cover traits aren't useful in other +/// languages. pub trait AChannelManager { /// A type implementing [`chain::Watch`]. type Watch: chain::Watch + ?Sized; diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index 1642f28efc7..b806b138a59 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -594,10 +594,26 @@ impl RecipientOnionFields { /// Note that if this field is non-empty, it will contain strictly increasing TLVs, each /// represented by a `(u64, Vec)` for its type number and serialized value respectively. /// This is validated when setting this field using [`Self::with_custom_tlvs`]. + #[cfg(not(c_bindings))] pub fn custom_tlvs(&self) -> &Vec<(u64, Vec)> { &self.custom_tlvs } + /// Gets the custom TLVs that will be sent or have been received. + /// + /// Custom TLVs allow sending extra application-specific data with a payment. They provide + /// additional flexibility on top of payment metadata, as while other implementations may + /// require `payment_metadata` to reflect metadata provided in an invoice, custom TLVs + /// do not have this restriction. + /// + /// Note that if this field is non-empty, it will contain strictly increasing TLVs, each + /// represented by a `(u64, Vec)` for its type number and serialized value respectively. + /// This is validated when setting this field using [`Self::with_custom_tlvs`]. + #[cfg(c_bindings)] + pub fn custom_tlvs(&self) -> Vec<(u64, Vec)> { + self.custom_tlvs.clone() + } + /// When we have received some HTLC(s) towards an MPP payment, as we receive further HTLC(s) we /// have to make sure that some fields match exactly across the parts. For those that aren't /// required to match, if they don't match we should remove them so as to not expose data diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 72176760243..26ecbb0bd24 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -3781,7 +3781,7 @@ fn test_retry_custom_tlvs() { payment_hash, Some(payment_secret), events.pop().unwrap(), true, None).unwrap(); match payment_claimable { Event::PaymentClaimable { onion_fields, .. } => { - assert_eq!(onion_fields.unwrap().custom_tlvs(), &custom_tlvs); + assert_eq!(&onion_fields.unwrap().custom_tlvs()[..], &custom_tlvs[..]); }, _ => panic!("Unexpected event"), }; diff --git a/lightning/src/offers/offer.rs b/lightning/src/offers/offer.rs index e0bc63e8b2b..60621b9dc74 100644 --- a/lightning/src/offers/offer.rs +++ b/lightning/src/offers/offer.rs @@ -366,7 +366,7 @@ macro_rules! offer_accessors { ($self: ident, $contents: expr) => { /// The chains that may be used when paying a requested invoice (e.g., bitcoin mainnet). /// Payments must be denominated in units of the minimal lightning-payable unit (e.g., msats) /// for the selected chain. - pub fn chains(&$self) -> Vec<$crate::bitcoin::blockdata::constants::ChainHash> { + pub fn chains(&$self) -> Vec { $contents.chains() } @@ -418,7 +418,7 @@ macro_rules! offer_accessors { ($self: ident, $contents: expr) => { } /// The public key used by the recipient to sign invoices. - pub fn signing_pubkey(&$self) -> $crate::bitcoin::secp256k1::PublicKey { + pub fn signing_pubkey(&$self) -> bitcoin::secp256k1::PublicKey { $contents.signing_pubkey() } } } diff --git a/lightning/src/onion_message/messenger.rs b/lightning/src/onion_message/messenger.rs index 2a9c830d38b..03dc6a5b002 100644 --- a/lightning/src/onion_message/messenger.rs +++ b/lightning/src/onion_message/messenger.rs @@ -246,6 +246,64 @@ pub trait CustomOnionMessageHandler { fn read_custom_message(&self, message_type: u64, buffer: &mut R) -> Result, msgs::DecodeError>; } + +/// Create an onion message with contents `message` to the destination of `path`. +/// Returns (introduction_node_id, onion_msg) +pub fn create_onion_message( + entropy_source: &ES, node_signer: &NS, secp_ctx: &Secp256k1, + path: OnionMessagePath, message: OnionMessageContents, reply_path: Option, +) -> Result<(PublicKey, msgs::OnionMessage), SendError> +where + ES::Target: EntropySource, + NS::Target: NodeSigner, +{ + let OnionMessagePath { intermediate_nodes, mut destination } = path; + if let Destination::BlindedPath(BlindedPath { ref blinded_hops, .. }) = destination { + if blinded_hops.len() < 2 { + return Err(SendError::TooFewBlindedHops); + } + } + + if message.tlv_type() < 64 { return Err(SendError::InvalidMessage) } + + // If we are sending straight to a blinded path and we are the introduction node, we need to + // advance the blinded path by 1 hop so the second hop is the new introduction node. + if intermediate_nodes.len() == 0 { + if let Destination::BlindedPath(ref mut blinded_path) = destination { + let our_node_id = node_signer.get_node_id(Recipient::Node) + .map_err(|()| SendError::GetNodeIdFailed)?; + if blinded_path.introduction_node_id == our_node_id { + advance_path_by_one(blinded_path, node_signer, &secp_ctx) + .map_err(|()| SendError::BlindedPathAdvanceFailed)?; + } + } + } + + let blinding_secret_bytes = entropy_source.get_secure_random_bytes(); + let blinding_secret = SecretKey::from_slice(&blinding_secret_bytes[..]).expect("RNG is busted"); + let (introduction_node_id, blinding_point) = if intermediate_nodes.len() != 0 { + (intermediate_nodes[0], PublicKey::from_secret_key(&secp_ctx, &blinding_secret)) + } else { + match destination { + Destination::Node(pk) => (pk, PublicKey::from_secret_key(&secp_ctx, &blinding_secret)), + Destination::BlindedPath(BlindedPath { introduction_node_id, blinding_point, .. }) => + (introduction_node_id, blinding_point), + } + }; + let (packet_payloads, packet_keys) = packet_payloads_and_keys( + &secp_ctx, &intermediate_nodes, destination, message, reply_path, &blinding_secret) + .map_err(|e| SendError::Secp256k1(e))?; + + let prng_seed = entropy_source.get_secure_random_bytes(); + let onion_routing_packet = construct_onion_message_packet( + packet_payloads, packet_keys, prng_seed).map_err(|()| SendError::TooBigPacket)?; + + Ok((introduction_node_id, msgs::OnionMessage { + blinding_point, + onion_routing_packet + })) +} + impl OnionMessenger where @@ -283,13 +341,9 @@ where &self, path: OnionMessagePath, message: OnionMessageContents, reply_path: Option ) -> Result<(), SendError> { - let (introduction_node_id, onion_msg) = Self::create_onion_message( - &self.entropy_source, - &self.node_signer, - &self.secp_ctx, - path, - message, - reply_path + let (introduction_node_id, onion_msg) = create_onion_message( + &self.entropy_source, &self.node_signer, &self.secp_ctx, + path, message, reply_path )?; let mut pending_per_peer_msgs = self.pending_messages.lock().unwrap(); @@ -303,63 +357,6 @@ where } } - /// Create an onion message with contents `message` to the destination of `path`. - /// Returns (introduction_node_id, onion_msg) - pub fn create_onion_message( - entropy_source: &ES, - node_signer: &NS, - secp_ctx: &Secp256k1, - path: OnionMessagePath, - message: OnionMessageContents, - reply_path: Option, - ) -> Result<(PublicKey, msgs::OnionMessage), SendError> { - let OnionMessagePath { intermediate_nodes, mut destination } = path; - if let Destination::BlindedPath(BlindedPath { ref blinded_hops, .. }) = destination { - if blinded_hops.len() < 2 { - return Err(SendError::TooFewBlindedHops); - } - } - - if message.tlv_type() < 64 { return Err(SendError::InvalidMessage) } - - // If we are sending straight to a blinded path and we are the introduction node, we need to - // advance the blinded path by 1 hop so the second hop is the new introduction node. - if intermediate_nodes.len() == 0 { - if let Destination::BlindedPath(ref mut blinded_path) = destination { - let our_node_id = node_signer.get_node_id(Recipient::Node) - .map_err(|()| SendError::GetNodeIdFailed)?; - if blinded_path.introduction_node_id == our_node_id { - advance_path_by_one(blinded_path, node_signer, &secp_ctx) - .map_err(|()| SendError::BlindedPathAdvanceFailed)?; - } - } - } - - let blinding_secret_bytes = entropy_source.get_secure_random_bytes(); - let blinding_secret = SecretKey::from_slice(&blinding_secret_bytes[..]).expect("RNG is busted"); - let (introduction_node_id, blinding_point) = if intermediate_nodes.len() != 0 { - (intermediate_nodes[0], PublicKey::from_secret_key(&secp_ctx, &blinding_secret)) - } else { - match destination { - Destination::Node(pk) => (pk, PublicKey::from_secret_key(&secp_ctx, &blinding_secret)), - Destination::BlindedPath(BlindedPath { introduction_node_id, blinding_point, .. }) => - (introduction_node_id, blinding_point), - } - }; - let (packet_payloads, packet_keys) = packet_payloads_and_keys( - &secp_ctx, &intermediate_nodes, destination, message, reply_path, &blinding_secret) - .map_err(|e| SendError::Secp256k1(e))?; - - let prng_seed = entropy_source.get_secure_random_bytes(); - let onion_routing_packet = construct_onion_message_packet( - packet_payloads, packet_keys, prng_seed).map_err(|()| SendError::TooBigPacket)?; - - Ok((introduction_node_id, msgs::OnionMessage { - blinding_point, - onion_routing_packet - })) - } - fn respond_with_onion_message( &self, response: OnionMessageContents, path_id: Option<[u8; 32]>, reply_path: Option diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 1297322fbd4..28d452c56b6 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -112,12 +112,12 @@ pub trait Router { /// [`find_route`]. /// /// [`ScoreLookUp`]: crate::routing::scoring::ScoreLookUp -pub struct ScorerAccountingForInFlightHtlcs<'a, SP: Sized, Sc: 'a + ScoreLookUp, S: Deref> { +pub struct ScorerAccountingForInFlightHtlcs<'a, S: Deref> where S::Target: ScoreLookUp { scorer: S, // Maps a channel's short channel id and its direction to the liquidity used up. inflight_htlcs: &'a InFlightHtlcs, } -impl<'a, SP: Sized, Sc: ScoreLookUp, S: Deref> ScorerAccountingForInFlightHtlcs<'a, SP, Sc, S> { +impl<'a, S: Deref> ScorerAccountingForInFlightHtlcs<'a, S> where S::Target: ScoreLookUp { /// Initialize a new `ScorerAccountingForInFlightHtlcs`. pub fn new(scorer: S, inflight_htlcs: &'a InFlightHtlcs) -> Self { ScorerAccountingForInFlightHtlcs { @@ -127,13 +127,8 @@ impl<'a, SP: Sized, Sc: ScoreLookUp, S: Deref> Sc } } -#[cfg(c_bindings)] -impl<'a, SP: Sized, Sc: ScoreLookUp, S: Deref> Writeable for ScorerAccountingForInFlightHtlcs<'a, SP, Sc, S> { - fn write(&self, writer: &mut W) -> Result<(), io::Error> { self.scorer.write(writer) } -} - -impl<'a, SP: Sized, Sc: 'a + ScoreLookUp, S: Deref> ScoreLookUp for ScorerAccountingForInFlightHtlcs<'a, SP, Sc, S> { - type ScoreParams = Sc::ScoreParams; +impl<'a, S: Deref> ScoreLookUp for ScorerAccountingForInFlightHtlcs<'a, S> where S::Target: ScoreLookUp { + type ScoreParams = ::ScoreParams; fn channel_penalty_msat(&self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams) -> u64 { if let Some(used_liquidity) = self.inflight_htlcs.used_liquidity_msat( source, target, short_channel_id diff --git a/lightning/src/routing/scoring.rs b/lightning/src/routing/scoring.rs index c790f5df5c3..207e1d69bd9 100644 --- a/lightning/src/routing/scoring.rs +++ b/lightning/src/routing/scoring.rs @@ -89,7 +89,7 @@ macro_rules! define_score { ($($supertrait: path)*) => { /// `ScoreLookUp` is used to determine the penalty for a given channel. /// /// Scoring is in terms of fees willing to be paid in order to avoid routing through a channel. -pub trait ScoreLookUp $(: $supertrait)* { +pub trait ScoreLookUp { /// A configurable type which should contain various passed-in parameters for configuring the scorer, /// on a per-routefinding-call basis through to the scorer methods, /// which are used to determine the parameters for the suitability of channels for use. @@ -108,7 +108,7 @@ pub trait ScoreLookUp $(: $supertrait)* { } /// `ScoreUpdate` is used to update the scorer's internal state after a payment attempt. -pub trait ScoreUpdate $(: $supertrait)* { +pub trait ScoreUpdate { /// Handles updating channel penalties after failing to route through a channel. fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64); @@ -122,8 +122,20 @@ pub trait ScoreUpdate $(: $supertrait)* { fn probe_successful(&mut self, path: &Path); } -impl, T: Deref $(+ $supertrait)*> ScoreLookUp for T { - type ScoreParams = SP; +/// A trait which can both lookup and update routing channel penalty scores. +/// +/// This is used in places where both bounds are required and implemented for all types which +/// implement [`ScoreLookUp`] and [`ScoreUpdate`]. +/// +/// Bindings users may need to manually implement this for their custom scoring implementations. +pub trait Score : ScoreLookUp + ScoreUpdate $(+ $supertrait)* {} + +#[cfg(not(c_bindings))] +impl Score for T {} + +#[cfg(not(c_bindings))] +impl> ScoreLookUp for T { + type ScoreParams = S::ScoreParams; fn channel_penalty_msat( &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams ) -> u64 { @@ -131,7 +143,8 @@ impl, T: Deref $(+ $supert } } -impl $(+ $supertrait)*> ScoreUpdate for T { +#[cfg(not(c_bindings))] +impl> ScoreUpdate for T { fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) { self.deref_mut().payment_path_failed(path, short_channel_id) } @@ -192,7 +205,7 @@ pub trait WriteableScore<'a>: LockableScore<'a> + Writeable {} #[cfg(not(c_bindings))] impl<'a, T> WriteableScore<'a> for T where T: LockableScore<'a> + Writeable {} #[cfg(not(c_bindings))] -impl<'a, T: 'a + ScoreLookUp + ScoreUpdate> LockableScore<'a> for Mutex { +impl<'a, T: Score + 'a> LockableScore<'a> for Mutex { type ScoreUpdate = T; type ScoreLookUp = T; @@ -209,7 +222,7 @@ impl<'a, T: 'a + ScoreLookUp + ScoreUpdate> LockableScore<'a> for Mutex { } #[cfg(not(c_bindings))] -impl<'a, T: 'a + ScoreUpdate + ScoreLookUp> LockableScore<'a> for RefCell { +impl<'a, T: Score + 'a> LockableScore<'a> for RefCell { type ScoreUpdate = T; type ScoreLookUp = T; @@ -226,7 +239,7 @@ impl<'a, T: 'a + ScoreUpdate + ScoreLookUp> LockableScore<'a> for RefCell { } #[cfg(not(c_bindings))] -impl<'a, SP:Sized, T: 'a + ScoreUpdate + ScoreLookUp> LockableScore<'a> for RwLock { +impl<'a, T: Score + 'a> LockableScore<'a> for RwLock { type ScoreUpdate = T; type ScoreLookUp = T; @@ -244,12 +257,12 @@ impl<'a, SP:Sized, T: 'a + ScoreUpdate + ScoreLookUp> Lockabl #[cfg(c_bindings)] /// A concrete implementation of [`LockableScore`] which supports multi-threading. -pub struct MultiThreadedLockableScore { +pub struct MultiThreadedLockableScore { score: RwLock, } #[cfg(c_bindings)] -impl<'a, SP:Sized, T: 'a + ScoreLookUp + ScoreUpdate> LockableScore<'a> for MultiThreadedLockableScore { +impl<'a, T: Score + 'a> LockableScore<'a> for MultiThreadedLockableScore { type ScoreUpdate = T; type ScoreLookUp = T; type WriteLocked = MultiThreadedScoreLockWrite<'a, Self::ScoreUpdate>; @@ -265,17 +278,17 @@ impl<'a, SP:Sized, T: 'a + ScoreLookUp + ScoreUpdate> Lockable } #[cfg(c_bindings)] -impl Writeable for MultiThreadedLockableScore { +impl Writeable for MultiThreadedLockableScore { fn write(&self, writer: &mut W) -> Result<(), io::Error> { self.score.read().unwrap().write(writer) } } #[cfg(c_bindings)] -impl<'a, T: 'a + ScoreUpdate + ScoreLookUp> WriteableScore<'a> for MultiThreadedLockableScore {} +impl<'a, T: Score + 'a> WriteableScore<'a> for MultiThreadedLockableScore {} #[cfg(c_bindings)] -impl MultiThreadedLockableScore { +impl MultiThreadedLockableScore { /// Creates a new [`MultiThreadedLockableScore`] given an underlying [`Score`]. pub fn new(score: T) -> Self { MultiThreadedLockableScore { score: RwLock::new(score) } @@ -284,14 +297,14 @@ impl MultiThreadedLockableScore { #[cfg(c_bindings)] /// A locked `MultiThreadedLockableScore`. -pub struct MultiThreadedScoreLockRead<'a, T: ScoreLookUp>(RwLockReadGuard<'a, T>); +pub struct MultiThreadedScoreLockRead<'a, T: Score>(RwLockReadGuard<'a, T>); #[cfg(c_bindings)] /// A locked `MultiThreadedLockableScore`. -pub struct MultiThreadedScoreLockWrite<'a, T: ScoreUpdate>(RwLockWriteGuard<'a, T>); +pub struct MultiThreadedScoreLockWrite<'a, T: Score>(RwLockWriteGuard<'a, T>); #[cfg(c_bindings)] -impl<'a, T: 'a + ScoreLookUp> Deref for MultiThreadedScoreLockRead<'a, T> { +impl<'a, T: 'a + Score> Deref for MultiThreadedScoreLockRead<'a, T> { type Target = T; fn deref(&self) -> &Self::Target { @@ -300,14 +313,24 @@ impl<'a, T: 'a + ScoreLookUp> Deref for MultiThreadedScoreLockRead<'a, T> { } #[cfg(c_bindings)] -impl<'a, T: 'a + ScoreUpdate> Writeable for MultiThreadedScoreLockWrite<'a, T> { +impl<'a, T: Score> ScoreLookUp for MultiThreadedScoreLockRead<'a, T> { + type ScoreParams = T::ScoreParams; + fn channel_penalty_msat(&self, short_channel_id: u64, source: &NodeId, + target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams + ) -> u64 { + self.0.channel_penalty_msat(short_channel_id, source, target, usage, score_params) + } +} + +#[cfg(c_bindings)] +impl<'a, T: Score> Writeable for MultiThreadedScoreLockWrite<'a, T> { fn write(&self, writer: &mut W) -> Result<(), io::Error> { self.0.write(writer) } } #[cfg(c_bindings)] -impl<'a, T: 'a + ScoreUpdate> Deref for MultiThreadedScoreLockWrite<'a, T> { +impl<'a, T: 'a + Score> Deref for MultiThreadedScoreLockWrite<'a, T> { type Target = T; fn deref(&self) -> &Self::Target { @@ -316,12 +339,31 @@ impl<'a, T: 'a + ScoreUpdate> Deref for MultiThreadedScoreLockWrite<'a, T> { } #[cfg(c_bindings)] -impl<'a, T: 'a + ScoreUpdate> DerefMut for MultiThreadedScoreLockWrite<'a, T> { +impl<'a, T: 'a + Score> DerefMut for MultiThreadedScoreLockWrite<'a, T> { fn deref_mut(&mut self) -> &mut Self::Target { self.0.deref_mut() } } +#[cfg(c_bindings)] +impl<'a, T: Score> ScoreUpdate for MultiThreadedScoreLockWrite<'a, T> { + fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) { + self.0.payment_path_failed(path, short_channel_id) + } + + fn payment_path_successful(&mut self, path: &Path) { + self.0.payment_path_successful(path) + } + + fn probe_failed(&mut self, path: &Path, short_channel_id: u64) { + self.0.probe_failed(path, short_channel_id) + } + + fn probe_successful(&mut self, path: &Path) { + self.0.probe_successful(path) + } +} + /// Proposed use of a channel passed as a parameter to [`ScoreLookUp::channel_penalty_msat`]. #[derive(Clone, Copy, Debug, PartialEq)] @@ -1417,6 +1459,10 @@ impl>, L: Deref, T: Time> ScoreUpdate for Prob } } +#[cfg(c_bindings)] +impl>, L: Deref, T: Time> Score for ProbabilisticScorerUsingTime +where L::Target: Logger {} + mod approx { const BITS: u32 = 64; const HIGHEST_BIT: u32 = BITS - 1; diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index 39f1001c480..0e81d481efb 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -275,6 +275,9 @@ impl SpendableOutputDescriptor { /// /// Note that this does not include any signatures, just the information required to /// construct the transaction and sign it. + /// + /// This is not exported to bindings users as there is no standard serialization for an input. + /// See [`Self::create_spendable_outputs_psbt`] instead. pub fn to_psbt_input(&self) -> bitcoin::psbt::Input { match self { SpendableOutputDescriptor::StaticOutput { output, .. } => { diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 2a022c37cc4..35e473c42a6 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -397,11 +397,7 @@ where pub fn new( kv_store: K, logger: L, maximum_pending_updates: u64, entropy_source: ES, signer_provider: SP, - ) -> Self - where - ES::Target: EntropySource + Sized, - SP::Target: SignerProvider + Sized, - { + ) -> Self { MonitorUpdatingPersister { kv_store, logger, @@ -416,12 +412,10 @@ where /// It is extremely important that your [`KVStore::read`] implementation uses the /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the /// documentation for [`MonitorUpdatingPersister`]. - pub fn read_all_channel_monitors_with_updates( - &self, broadcaster: B, fee_estimator: F, + pub fn read_all_channel_monitors_with_updates( + &self, broadcaster: &B, fee_estimator: &F, ) -> Result::Signer>)>, io::Error> where - ES::Target: EntropySource + Sized, - SP::Target: SignerProvider + Sized, B::Target: BroadcasterInterface, F::Target: FeeEstimator, { @@ -432,8 +426,8 @@ where let mut res = Vec::with_capacity(monitor_list.len()); for monitor_key in monitor_list { res.push(self.read_channel_monitor_with_updates( - &broadcaster, - fee_estimator.clone(), + broadcaster, + fee_estimator, monitor_key, )?) } @@ -457,12 +451,10 @@ where /// /// Loading a large number of monitors will be faster if done in parallel. You can use this /// function to accomplish this. Take care to limit the number of parallel readers. - pub fn read_channel_monitor_with_updates( - &self, broadcaster: &B, fee_estimator: F, monitor_key: String, + pub fn read_channel_monitor_with_updates( + &self, broadcaster: &B, fee_estimator: &F, monitor_key: String, ) -> Result<(BlockHash, ChannelMonitor<::Signer>), io::Error> where - ES::Target: EntropySource + Sized, - SP::Target: SignerProvider + Sized, B::Target: BroadcasterInterface, F::Target: FeeEstimator, { @@ -484,7 +476,7 @@ where Err(err) => return Err(err), }; - monitor.update_monitor(&update, broadcaster, fee_estimator.clone(), &self.logger) + monitor.update_monitor(&update, broadcaster, fee_estimator, &self.logger) .map_err(|e| { log_error!( self.logger, @@ -949,17 +941,17 @@ mod tests { // Check that the persisted channel data is empty before any channels are // open. let mut persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates( - broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap(); + &broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap(); assert_eq!(persisted_chan_data_0.len(), 0); let mut persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates( - broadcaster_1, &chanmon_cfgs[1].fee_estimator).unwrap(); + &broadcaster_1, &&chanmon_cfgs[1].fee_estimator).unwrap(); assert_eq!(persisted_chan_data_1.len(), 0); // Helper to make sure the channel is on the expected update ID. macro_rules! check_persisted_data { ($expected_update_id: expr) => { persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates( - broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap(); + &broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap(); // check that we stored only one monitor assert_eq!(persisted_chan_data_0.len(), 1); for (_, mon) in persisted_chan_data_0.iter() { @@ -978,7 +970,7 @@ mod tests { } } persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates( - broadcaster_1, &chanmon_cfgs[1].fee_estimator).unwrap(); + &broadcaster_1, &&chanmon_cfgs[1].fee_estimator).unwrap(); assert_eq!(persisted_chan_data_1.len(), 1); for (_, mon) in persisted_chan_data_1.iter() { assert_eq!(mon.get_latest_update_id(), $expected_update_id); @@ -1043,7 +1035,7 @@ mod tests { check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID); // Make sure the expected number of stale updates is present. - let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap(); + let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap(); let (_, monitor) = &persisted_chan_data[0]; let monitor_name = MonitorName::from(monitor.get_funding_txo().0); // The channel should have 0 updates, as it wrote a full monitor and consolidated. @@ -1151,7 +1143,7 @@ mod tests { // Check that the persisted channel data is empty before any channels are // open. - let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap(); + let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap(); assert_eq!(persisted_chan_data.len(), 0); // Create some initial channel @@ -1162,7 +1154,7 @@ mod tests { send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000); // Get the monitor and make a fake stale update at update_id=1 (lowest height of an update possible) - let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap(); + let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap(); let (_, monitor) = &persisted_chan_data[0]; let monitor_name = MonitorName::from(monitor.get_funding_txo().0); persister_0 diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 4bcaca57f81..bd66e4cae3e 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -140,10 +140,10 @@ impl<'a> Router for TestRouter<'a> { // Since the path is reversed, the last element in our iteration is the first // hop. if idx == path.hops.len() - 1 { - scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(payer), &NodeId::from_pubkey(&hop.pubkey), usage, &()); + scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(payer), &NodeId::from_pubkey(&hop.pubkey), usage, &Default::default()); } else { let curr_hop_path_idx = path.hops.len() - 1 - idx; - scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(&path.hops[curr_hop_path_idx - 1].pubkey), &NodeId::from_pubkey(&hop.pubkey), usage, &()); + scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(&path.hops[curr_hop_path_idx - 1].pubkey), &NodeId::from_pubkey(&hop.pubkey), usage, &Default::default()); } } } @@ -153,7 +153,7 @@ impl<'a> Router for TestRouter<'a> { let logger = TestLogger::new(); find_route( payer, params, &self.network_graph, first_hops, &logger, - &ScorerAccountingForInFlightHtlcs::new(self.scorer.read().unwrap(), &inflight_htlcs), &(), + &ScorerAccountingForInFlightHtlcs::new(self.scorer.read().unwrap(), &inflight_htlcs), &Default::default(), &[42; 32] ) } @@ -423,7 +423,7 @@ impl chainmonitor::Persist fo chain::ChannelMonitorUpdateStatus::Completed } - fn update_persisted_channel(&self, funding_txo: OutPoint, update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor, update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus { + fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor, update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus { let mut ret = chain::ChannelMonitorUpdateStatus::Completed; if let Some(update_ret) = self.update_rets.lock().unwrap().pop_front() { ret = update_ret;