Skip to content

Commit e00747e

Browse files
Aditya SharmaAditya Sharma
authored andcommitted
Handle PeerStorage Message and its Persistence
This commit introduces the handling and persistence of PeerStorage messages on a per-peer basis. The peer storage is stored within the PeerState to simplify management, ensuring we do not need to remove it when there are no active channels with the peer. Key changes include: - Add PeerStorage to PeerState for persistent storage. - Implement internal_peer_storage to manage PeerStorage and its updates. - Add resend logic in peer_connected() to resend PeerStorage before sending the channel reestablish message upon reconnection. - Update PeerState's write() and read() methods to support PeerStorage persistence.
1 parent 10463d0 commit e00747e

File tree

1 file changed

+52
-2
lines changed

1 file changed

+52
-2
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 52 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1380,6 +1380,8 @@ pub(super) struct PeerState<SP: Deref> where SP::Target: SignerProvider {
13801380
/// [`ChannelMessageHandler::peer_connected`] and no corresponding
13811381
/// [`ChannelMessageHandler::peer_disconnected`].
13821382
pub is_connected: bool,
1383+
/// Holds the peer storage data for the channel partner on a per-peer basis.
1384+
peer_storage: Vec<u8>,
13831385
}
13841386

13851387
impl <SP: Deref> PeerState<SP> where SP::Target: SignerProvider {
@@ -8170,9 +8172,41 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
81708172
}
81718173
}
81728174

8173-
fn internal_your_peer_storage(&self, _counterparty_node_id: &PublicKey, _msg: &msgs::YourPeerStorageMessage) {}
8175+
fn internal_peer_storage(&self, counterparty_node_id: &PublicKey, msg: &msgs::PeerStorageMessage) {
8176+
let per_peer_state = self.per_peer_state.read().unwrap();
8177+
let peer_state_mutex = match per_peer_state.get(counterparty_node_id) {
8178+
Some(peer_state_mutex) => peer_state_mutex,
8179+
None => return,
8180+
};
8181+
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8182+
let peer_state = &mut *peer_state_lock;
8183+
let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), None, None);
8184+
8185+
// Check if we have any channels with the peer (Currently we only provide the servie to peers we have a channel with).
8186+
if !peer_state.channel_by_id.values().any(|phase| phase.is_funded()) {
8187+
log_debug!(logger, "Ignoring peer storage request from {} as we don't have any funded channels with them.", log_pubkey!(counterparty_node_id));
8188+
return;
8189+
}
8190+
8191+
#[cfg(not(test))]
8192+
if msg.data.len() > 1024 {
8193+
log_debug!(logger, "Sending warning to peer and ignoring peer storage request from {} as its over 1KiB", log_pubkey!(counterparty_node_id));
8194+
peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
8195+
node_id: counterparty_node_id.clone(),
8196+
action: msgs::ErrorAction::SendWarningMessage {
8197+
msg: msgs::WarningMessage {
8198+
channel_id: ChannelId([0; 32]),
8199+
data: "Supports only data up to 1 KiB in peer storage.".to_owned()
8200+
},
8201+
log_level: Level::Trace,
8202+
}
8203+
});
8204+
return;
8205+
}
81748206

8175-
fn internal_peer_storage(&self, _counterparty_node_id: &PublicKey, _msg: &msgs::PeerStorageMessage) {}
8207+
log_trace!(logger, "Received Peer Storage from {}", log_pubkey!(counterparty_node_id));
8208+
peer_state.peer_storage = msg.data.clone();
8209+
}
81768210

81778211
fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
81788212
let best_block = *self.best_block.read().unwrap();
@@ -11664,6 +11698,7 @@ where
1166411698
actions_blocking_raa_monitor_updates: BTreeMap::new(),
1166511699
closed_channel_monitor_update_ids: BTreeMap::new(),
1166611700
is_connected: true,
11701+
peer_storage: Vec::new(),
1166711702
}));
1166811703
},
1166911704
hash_map::Entry::Occupied(e) => {
@@ -11693,6 +11728,15 @@ where
1169311728
let peer_state = &mut *peer_state_lock;
1169411729
let pending_msg_events = &mut peer_state.pending_msg_events;
1169511730

11731+
if !peer_state.peer_storage.is_empty() {
11732+
pending_msg_events.push(events::MessageSendEvent::SendPeerStorageRetrievalMessage {
11733+
node_id: counterparty_node_id.clone(),
11734+
msg: msgs::PeerStorageRetrievalMessage {
11735+
data: peer_state.peer_storage.clone()
11736+
},
11737+
});
11738+
}
11739+
1169611740
for (_, chan) in peer_state.channel_by_id.iter_mut() {
1169711741
let logger = WithChannelContext::from(&self.logger, &chan.context(), None);
1169811742
match chan.peer_connected_get_handshake(self.chain_hash, &&logger) {
@@ -12859,6 +12903,9 @@ where
1285912903
if !peer_state.ok_to_remove(false) {
1286012904
peer_pubkey.write(writer)?;
1286112905
peer_state.latest_features.write(writer)?;
12906+
12907+
peer_state.peer_storage.write(writer)?;
12908+
1286212909
if !peer_state.monitor_update_blocked_actions.is_empty() {
1286312910
monitor_update_blocked_actions_per_peer
1286412911
.get_or_insert_with(Vec::new)
@@ -13212,6 +13259,7 @@ where
1321213259
monitor_update_blocked_actions: BTreeMap::new(),
1321313260
actions_blocking_raa_monitor_updates: BTreeMap::new(),
1321413261
closed_channel_monitor_update_ids: BTreeMap::new(),
13262+
peer_storage: Vec::new(),
1321513263
is_connected: false,
1321613264
}
1321713265
};
@@ -13435,8 +13483,10 @@ where
1343513483
for _ in 0..peer_count {
1343613484
let peer_pubkey: PublicKey = Readable::read(reader)?;
1343713485
let latest_features = Readable::read(reader)?;
13486+
let peer_storage = Readable::read(reader)?;
1343813487
if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) {
1343913488
peer_state.get_mut().unwrap().latest_features = latest_features;
13489+
peer_state.get_mut().unwrap().peer_storage = peer_storage;
1344013490
}
1344113491
}
1344213492

0 commit comments

Comments
 (0)