diff --git a/lightning-liquidity/src/lib.rs b/lightning-liquidity/src/lib.rs index 5fb59c319c8..646a6e43015 100644 --- a/lightning-liquidity/src/lib.rs +++ b/lightning-liquidity/src/lib.rs @@ -46,7 +46,7 @@ #![allow(ellipsis_inclusive_range_patterns)] #![allow(clippy::drop_non_drop)] #![cfg_attr(docsrs, feature(doc_auto_cfg))] -#![cfg_attr(not(feature = "std"), no_std)] +#![cfg_attr(not(any(test, feature = "std")), no_std)] #[macro_use] extern crate alloc; diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 7f77f20e088..de1430ca0ac 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -8311,6 +8311,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ hold_time, ); + #[cfg(test)] + let claiming_chan_funding_outpoint = hop_data.outpoint; self.claim_funds_from_hop( hop_data, payment_preimage, @@ -8329,6 +8331,50 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // monitor updates still in flight. In that case, we shouldn't // immediately free, but instead let that monitor update complete // in the background. + #[cfg(test)] + { + let per_peer_state = self.per_peer_state.deadlocking_read(); + // The channel we'd unblock should already be closed, or... + let channel_closed = per_peer_state + .get(&next_channel_counterparty_node_id) + .map(|lck| lck.deadlocking_lock()) + .map(|peer| !peer.channel_by_id.contains_key(&next_channel_id)) + .unwrap_or(true); + let background_events = + self.pending_background_events.lock().unwrap(); + // there should be a `BackgroundEvent` pending... + let matching_bg_event = + background_events.iter().any(|ev| { + match ev { + // to apply a monitor update that blocked the claiming channel, + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + funding_txo, update, .. + } => { + if *funding_txo == claiming_chan_funding_outpoint { + assert!(update.updates.iter().any(|upd| + if let ChannelMonitorUpdateStep::PaymentPreimage { + payment_preimage: update_preimage, .. + } = upd { + payment_preimage == *update_preimage + } else { false } + ), "{:?}", update); + true + } else { false } + }, + // or the monitor update has completed and will unblock + // immediately once we get going. + BackgroundEvent::MonitorUpdatesComplete { + channel_id, .. + } => + *channel_id == prev_channel_id, + } + }); + assert!( + channel_closed || matching_bg_event, + "{:?}", + *background_events + ); + } (None, None) } else if definitely_duplicate { if let Some(other_chan) = chan_to_release { diff --git a/lightning/src/sync/debug_sync.rs b/lightning/src/sync/debug_sync.rs index 991a71ffbe0..c8fb212a58b 100644 --- a/lightning/src/sync/debug_sync.rs +++ b/lightning/src/sync/debug_sync.rs @@ -1,6 +1,7 @@ pub use alloc::sync::Arc; use core::fmt; use core::ops::{Deref, DerefMut}; +#[cfg(feature = "std")] use core::time::Duration; use std::cell::RefCell; @@ -10,6 +11,7 @@ use std::sync::RwLock as StdRwLock; use std::sync::RwLockReadGuard as StdRwLockReadGuard; use std::sync::RwLockWriteGuard as StdRwLockWriteGuard; +#[cfg(feature = "std")] use parking_lot::Condvar as StdCondvar; use parking_lot::Mutex as StdMutex; use parking_lot::MutexGuard as StdMutexGuard; @@ -34,10 +36,12 @@ impl Backtrace { pub type LockResult = Result; +#[cfg(feature = "std")] pub struct Condvar { inner: StdCondvar, } +#[cfg(feature = "std")] impl Condvar { pub fn new() -> Condvar { Condvar { inner: StdCondvar::new() } @@ -287,6 +291,7 @@ pub struct MutexGuard<'a, T: Sized + 'a> { } impl<'a, T: Sized> MutexGuard<'a, T> { + #[cfg(feature = "std")] fn into_inner(self) -> StdMutexGuard<'a, T> { // Somewhat unclear why we cannot move out of self.lock, but doing so gets E0509. unsafe { @@ -332,6 +337,19 @@ impl Mutex { } } + #[cfg(test)] + /// Takes a lock without any deadlock detection logic. This should never exist in production + /// code (the deadlock detection logic is important!) but can be used in tests where we're + /// willing to risk deadlocks (accepting that simply no existing tests hit them). + pub fn deadlocking_lock<'a>(&'a self) -> MutexGuard<'a, T> { + let lock = self.inner.lock(); + if self.poisoned.load(Ordering::Acquire) { + panic!(); + } else { + MutexGuard { mutex: self, lock: Some(lock) } + } + } + pub fn lock<'a>(&'a self) -> LockResult> { LockMetadata::pre_lock(&self.deps, false); let lock = self.inner.lock(); @@ -443,6 +461,14 @@ impl RwLock { self.inner.read().map(|guard| RwLockReadGuard { lock: self, guard }).map_err(|_| ()) } + #[cfg(test)] + /// Takes a read lock without any deadlock detection logic. This should never exist in + /// production code (the deadlock detection logic is important!) but can be used in tests where + /// we're willing to risk deadlocks (accepting that simply no existing tests hit them). + pub fn deadlocking_read<'a>(&'a self) -> RwLockReadGuard<'a, T> { + self.inner.read().map(|guard| RwLockReadGuard { lock: self, guard }).unwrap() + } + pub fn write<'a>(&'a self) -> LockResult> { LockMetadata::pre_lock(&self.deps, false); self.inner.write().map(|guard| RwLockWriteGuard { lock: self, guard }).map_err(|_| ()) diff --git a/lightning/src/sync/mod.rs b/lightning/src/sync/mod.rs index 3ed076b7dfd..9a889845203 100644 --- a/lightning/src/sync/mod.rs +++ b/lightning/src/sync/mod.rs @@ -20,11 +20,11 @@ pub(crate) trait LockTestExt<'a> { fn unsafe_well_ordered_double_lock_self(&'a self) -> Self::ExclLock; } -#[cfg(all(feature = "std", not(ldk_bench), test))] +#[cfg(all(not(ldk_bench), test))] mod debug_sync; -#[cfg(all(feature = "std", not(ldk_bench), test))] +#[cfg(all(not(ldk_bench), test))] pub use debug_sync::*; -#[cfg(all(feature = "std", not(ldk_bench), test))] +#[cfg(all(not(ldk_bench), test))] // Note that to make debug_sync's regex work this must not contain `debug_string` in the module name mod test_lockorder_checks; @@ -63,7 +63,7 @@ mod ext_impl { } } -#[cfg(not(feature = "std"))] +#[cfg(all(not(feature = "std"), any(ldk_bench, not(test))))] mod nostd_sync; -#[cfg(not(feature = "std"))] +#[cfg(all(not(feature = "std"), any(ldk_bench, not(test))))] pub use nostd_sync::*;