@@ -58,6 +58,7 @@ use crate::events::{
5858use crate::events::{FundingInfo, PaidBolt12Invoice};
5959// Since this struct is returned in `list_channels` methods, expose it here in case users want to
6060// construct one themselves.
61+ use crate::io;
6162use crate::ln::channel::PendingV2Channel;
6263use crate::ln::channel::{
6364 self, Channel, ChannelError, ChannelUpdateStatus, FundedChannel, InboundV1Channel,
@@ -78,7 +79,7 @@ use crate::ln::onion_payment::{
7879};
7980use crate::ln::onion_utils::{self};
8081use crate::ln::onion_utils::{HTLCFailReason, LocalHTLCFailureReason};
81- use crate::ln::our_peer_storage::EncryptedOurPeerStorage;
82+ use crate::ln::our_peer_storage::{ EncryptedOurPeerStorage, PeerStorageMonitorHolderList} ;
8283#[cfg(test)]
8384use crate::ln::outbound_payment;
8485use crate::ln::outbound_payment::{
@@ -174,7 +175,6 @@ use lightning_invoice::{
174175
175176use alloc::collections::{btree_map, BTreeMap};
176177
177- use crate::io;
178178use crate::io::Read;
179179use crate::prelude::*;
180180use crate::sync::{Arc, FairRwLock, LockHeldState, LockTestExt, Mutex, RwLock, RwLockReadGuard};
@@ -8807,6 +8807,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
88078807 &self, peer_node_id: PublicKey, msg: msgs::PeerStorageRetrieval,
88088808 ) -> Result<(), MsgHandleErrInternal> {
88098809 // TODO: Check if have any stale or missing ChannelMonitor.
8810+ let per_peer_state = self.per_peer_state.read().unwrap();
88108811 let logger = WithContext::from(&self.logger, Some(peer_node_id), None, None);
88118812 let err = || {
88128813 MsgHandleErrInternal::from_chan_no_close(
@@ -8833,6 +8834,55 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
88338834
88348835 log_trace!(logger, "Got valid {}-byte peer backup from {}", decrypted.len(), peer_node_id);
88358836
8837+ let mut cursor = io::Cursor::new(decrypted);
8838+ match <PeerStorageMonitorHolderList as Readable>::read(&mut cursor) {
8839+ Ok(mon_list) => {
8840+ for mon_holder in mon_list.monitors.iter() {
8841+ let peer_state_mutex =
8842+ match per_peer_state.get(&mon_holder.counterparty_node_id) {
8843+ Some(mutex) => mutex,
8844+ None => {
8845+ log_debug!(
8846+ logger,
8847+ "Not able to find peer_state for the counterparty {}, channelId {}",
8848+ log_pubkey!(mon_holder.counterparty_node_id),
8849+ mon_holder.channel_id
8850+ );
8851+ continue;
8852+ },
8853+ };
8854+
8855+ let peer_state_lock = peer_state_mutex.lock().unwrap();
8856+ let peer_state = &*peer_state_lock;
8857+
8858+ match peer_state.channel_by_id.get(&mon_holder.channel_id) {
8859+ Some(chan) => {
8860+ if let Some(funded_chan) = chan.as_funded() {
8861+ if funded_chan
8862+ .get_revoked_counterparty_commitment_transaction_number()
8863+ > mon_holder.min_seen_secret
8864+ {
8865+ panic!(
8866+ "Lost channel state for channel {}.
8867+ Received peer storage with a more recent state than what our node had.
8868+ Use the FundRecoverer to initiate a force close and sweep the funds.",
8869+ &mon_holder.channel_id
8870+ );
8871+ }
8872+ }
8873+ },
8874+ None => {
8875+ // TODO: Figure out if this channel is so old that we have forgotten about it.
8876+ panic!("Lost a channel {}", &mon_holder.channel_id);
8877+ },
8878+ }
8879+ }
8880+ },
8881+
8882+ Err(e) => {
8883+ panic!("Wrong serialisation of PeerStorageMonitorHolderList: {}", e);
8884+ },
8885+ }
88368886 Ok(())
88378887 }
88388888
@@ -8858,6 +8908,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
88588908 ), ChannelId([0; 32])));
88598909 }
88608910
8911+ #[cfg(not(test))]
88618912 if msg.data.len() > MAX_PEER_STORAGE_SIZE {
88628913 log_debug!(logger, "Sending warning to peer and ignoring peer storage request from {} as its over 1KiB", log_pubkey!(counterparty_node_id));
88638914
0 commit comments