-
Notifications
You must be signed in to change notification settings - Fork 421
Reconstruct ChannelManager forwarded HTLCs maps from Channels
#4227
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from 1 commit
68cdc2d
d023f37
9a7c3f5
1e86521
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -11509,6 +11509,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ | |
|
|
||
| if !new_intercept_events.is_empty() { | ||
| let mut events = self.pending_events.lock().unwrap(); | ||
| new_intercept_events.retain(|new_ev| !events.contains(new_ev)); | ||
| events.append(&mut new_intercept_events); | ||
| } | ||
| } | ||
|
|
@@ -17153,7 +17154,11 @@ where | |
|
|
||
| const MAX_ALLOC_SIZE: usize = 1024 * 64; | ||
| let forward_htlcs_count: u64 = Readable::read(reader)?; | ||
| let mut forward_htlcs = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); | ||
| // This map is read but may no longer be used because we'll attempt to rebuild `forward_htlcs` | ||
| // from the `Channel{Monitor}`s instead, as a step towards getting rid of `ChannelManager` | ||
| // persistence. | ||
| let mut forward_htlcs_legacy: HashMap<u64, Vec<HTLCForwardInfo>> = | ||
| hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); | ||
| for _ in 0..forward_htlcs_count { | ||
| let short_channel_id = Readable::read(reader)?; | ||
| let pending_forwards_count: u64 = Readable::read(reader)?; | ||
|
|
@@ -17164,7 +17169,7 @@ where | |
| for _ in 0..pending_forwards_count { | ||
| pending_forwards.push(Readable::read(reader)?); | ||
| } | ||
| forward_htlcs.insert(short_channel_id, pending_forwards); | ||
| forward_htlcs_legacy.insert(short_channel_id, pending_forwards); | ||
| } | ||
|
|
||
| let claimable_htlcs_count: u64 = Readable::read(reader)?; | ||
|
|
@@ -17252,12 +17257,18 @@ where | |
| }; | ||
| } | ||
|
|
||
| // Some maps are read but may no longer be used because we attempt to rebuild pending HTLC | ||
| // forwards from the `Channel{Monitor}`s instead, as a step towards getting rid of | ||
| // `ChannelManager` persistence. | ||
| let mut pending_intercepted_htlcs_legacy: Option<HashMap<InterceptId, PendingAddHTLCInfo>> = | ||
| Some(new_hash_map()); | ||
| let mut decode_update_add_htlcs_legacy: Option<HashMap<u64, Vec<msgs::UpdateAddHTLC>>> = | ||
| None; | ||
|
|
||
| // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients. | ||
| let mut pending_outbound_payments_no_retry: Option<HashMap<PaymentId, HashSet<[u8; 32]>>> = | ||
| None; | ||
| let mut pending_outbound_payments = None; | ||
| let mut pending_intercepted_htlcs: Option<HashMap<InterceptId, PendingAddHTLCInfo>> = | ||
| Some(new_hash_map()); | ||
| let mut received_network_pubkey: Option<PublicKey> = None; | ||
| let mut fake_scid_rand_bytes: Option<[u8; 32]> = None; | ||
| let mut probing_cookie_secret: Option<[u8; 32]> = None; | ||
|
|
@@ -17275,14 +17286,12 @@ where | |
| let mut in_flight_monitor_updates: Option< | ||
| HashMap<(PublicKey, ChannelId), Vec<ChannelMonitorUpdate>>, | ||
| > = None; | ||
| let mut decode_update_add_htlcs_legacy: Option<HashMap<u64, Vec<msgs::UpdateAddHTLC>>> = | ||
| None; | ||
| let mut inbound_payment_id_secret = None; | ||
| let mut peer_storage_dir: Option<Vec<(PublicKey, Vec<u8>)>> = None; | ||
| let mut async_receive_offer_cache: AsyncReceiveOfferCache = AsyncReceiveOfferCache::new(); | ||
| read_tlv_fields!(reader, { | ||
| (1, pending_outbound_payments_no_retry, option), | ||
| (2, pending_intercepted_htlcs, option), | ||
| (2, pending_intercepted_htlcs_legacy, option), | ||
| (3, pending_outbound_payments, option), | ||
| (4, pending_claiming_payments, option), | ||
| (5, received_network_pubkey, option), | ||
|
|
@@ -17704,7 +17713,7 @@ where | |
| "HTLC was forwarded to the closed channel", | ||
| &args.logger, | ||
| ); | ||
| forward_htlcs.retain(|_, forwards| { | ||
| forward_htlcs_legacy.retain(|_, forwards| { | ||
| forwards.retain(|forward| { | ||
| if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { | ||
| if pending_forward_matches_htlc(&htlc_info) { | ||
|
|
@@ -17716,7 +17725,7 @@ where | |
| }); | ||
| !forwards.is_empty() | ||
| }); | ||
| pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| { | ||
| pending_intercepted_htlcs_legacy.as_mut().unwrap().retain(|intercepted_id, htlc_info| { | ||
| if pending_forward_matches_htlc(&htlc_info) { | ||
| log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", | ||
| &htlc.payment_hash, &monitor.channel_id()); | ||
|
|
@@ -18224,6 +18233,22 @@ where | |
| ) | ||
| .with_async_payments_offers_cache(async_receive_offer_cache); | ||
|
|
||
| // If we are reading from a `ChannelManager` that was last serialized on LDK 0.2 or earlier, we | ||
| // won't have been able to rebuild `decode_update_add_htlcs` from `Channel`s and should use | ||
| // the legacy serialized maps instead. | ||
| // TODO: if we read an upgraded channel but there just happened to be no committed update_adds | ||
| // present, we'll use the old maps here. Maybe that's fine but we might want to add a flag in | ||
| // the `Channel` that indicates it is upgraded and will serialize committed update_adds. | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Seems safer to be explicit
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think being explicit may not work either because IIUC we need to handle the case where some of the deserialized
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I see what you mean. But maybe it can be explicit on the channel level?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I believe it's possible to have a mix of upgraded and downgraded HTLCs within the same |
||
| let (forward_htlcs, decode_update_add_htlcs, pending_intercepted_htlcs) = | ||
| if decode_update_add_htlcs.is_empty() { | ||
| ( | ||
| forward_htlcs_legacy, | ||
| decode_update_add_htlcs_legacy, | ||
| pending_intercepted_htlcs_legacy.unwrap(), | ||
| ) | ||
| } else { | ||
| (new_hash_map(), decode_update_add_htlcs, new_hash_map()) | ||
| }; | ||
| let channel_manager = ChannelManager { | ||
| chain_hash, | ||
| fee_estimator: bounded_fee_estimator, | ||
|
|
@@ -18236,10 +18261,10 @@ where | |
|
|
||
| inbound_payment_key: expanded_inbound_key, | ||
| pending_outbound_payments: pending_outbounds, | ||
| pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()), | ||
| pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs), | ||
|
|
||
| forward_htlcs: Mutex::new(forward_htlcs), | ||
| decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs_legacy), | ||
| decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs), | ||
| claimable_payments: Mutex::new(ClaimablePayments { | ||
| claimable_payments, | ||
| pending_claiming_payments: pending_claiming_payments.unwrap(), | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -20,6 +20,7 @@ use crate::chain::transaction::OutPoint; | |
| use crate::events::{ClosureReason, Event, HTLCHandlingFailureType}; | ||
| use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RecipientOnionFields, RAACommitmentOrder}; | ||
| use crate::ln::msgs; | ||
| use crate::ln::outbound_payment::Retry; | ||
| use crate::ln::types::ChannelId; | ||
| use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, RoutingMessageHandler, ErrorAction, MessageSendEvent}; | ||
| use crate::util::test_channel_signer::TestChannelSigner; | ||
|
|
@@ -508,7 +509,6 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { | |
|
|
||
| #[cfg(feature = "std")] | ||
| fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, not_stale: bool) { | ||
| use crate::ln::channelmanager::Retry; | ||
| use crate::types::string::UntrustedString; | ||
| // When we get a data_loss_protect proving we're behind, we immediately panic as the | ||
| // chain::Watch API requirements have been violated (e.g. the user restored from a backup). The | ||
|
|
@@ -1173,6 +1173,93 @@ fn removed_payment_no_manager_persistence() { | |
| expect_payment_failed!(nodes[0], payment_hash, false); | ||
| } | ||
|
|
||
| #[test] | ||
| fn manager_persisted_pre_htlc_forward_on_outbound_edge() { | ||
| do_manager_persisted_pre_htlc_forward_on_outbound_edge(false); | ||
| } | ||
|
|
||
| #[test] | ||
| fn manager_persisted_pre_intercept_forward_on_outbound_edge() { | ||
| do_manager_persisted_pre_htlc_forward_on_outbound_edge(true); | ||
| } | ||
|
|
||
| fn do_manager_persisted_pre_htlc_forward_on_outbound_edge(intercept_htlc: bool) { | ||
| let chanmon_cfgs = create_chanmon_cfgs(3); | ||
| let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); | ||
| let persister; | ||
| let new_chain_monitor; | ||
| let mut intercept_forwards_config = test_default_channel_config(); | ||
| intercept_forwards_config.accept_intercept_htlcs = true; | ||
| let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), None]); | ||
| let nodes_1_deserialized; | ||
| let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); | ||
|
|
||
| let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2; | ||
| let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; | ||
|
|
||
| let intercept_scid = nodes[1].node.get_intercept_scid(); | ||
|
|
||
| // Lock in the HTLC from node_a <> node_b. | ||
| let amt_msat = 5000; | ||
| let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); | ||
| if intercept_htlc { | ||
| route.paths[0].hops[1].short_channel_id = intercept_scid; | ||
| } | ||
| nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); | ||
| check_added_monitors(&nodes[0], 1); | ||
| let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); | ||
| nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); | ||
| do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false); | ||
|
|
||
| // Decode the HTLC onion but don't forward it to the next hop, such that the HTLC ends up in | ||
| // `ChannelManager::forward_htlcs` or `ChannelManager::pending_intercepted_htlcs`. | ||
| nodes[1].node.process_pending_update_add_htlcs(); | ||
|
|
||
| // Disconnect peers and reload the forwarding node_b. | ||
| nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); | ||
| nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); | ||
|
|
||
| let node_b_encoded = nodes[1].node.encode(); | ||
|
|
||
| let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode(); | ||
| let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode(); | ||
| reload_node!(nodes[1], node_b_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); | ||
|
|
||
| reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[0])); | ||
| let mut args_b_c = ReconnectArgs::new(&nodes[1], &nodes[2]); | ||
| args_b_c.send_channel_ready = (true, true); | ||
| args_b_c.send_announcement_sigs = (true, true); | ||
| reconnect_nodes(args_b_c); | ||
|
|
||
| // Forward the HTLC and ensure we can claim it post-reload. | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. What would happen again without the changes in this PR? I assume the htlc wouldn't be forwarded here, but would it be failed back instead?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. You mean if we did the 3-line fix discussed offline? I don't think so, because the manager will only fail HTLCs that it knows about. I think we just wouldn't handle the HTLC and the channel would FC.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes indeed, if we just discard the forward htlcs. I thought there was still some inbound htlc timer somewhere, but I guess not then?
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I wonder how fixing the FC by failing back the htlc and then completely forgetting about forwards on restart would compare to the current approach. |
||
| nodes[1].node.process_pending_htlc_forwards(); | ||
|
|
||
| if intercept_htlc { | ||
| let events = nodes[1].node.get_and_clear_pending_events(); | ||
| assert_eq!(events.len(), 1); | ||
| let (intercept_id, expected_outbound_amt_msat) = match events[0] { | ||
| Event::HTLCIntercepted { intercept_id, expected_outbound_amount_msat, .. } => { | ||
| (intercept_id, expected_outbound_amount_msat) | ||
| }, | ||
| _ => panic!() | ||
| }; | ||
| nodes[1].node.forward_intercepted_htlc(intercept_id, &chan_id_2, | ||
| nodes[2].node.get_our_node_id(), expected_outbound_amt_msat).unwrap(); | ||
| nodes[1].node.process_pending_htlc_forwards(); | ||
| } | ||
| check_added_monitors(&nodes[1], 1); | ||
|
|
||
| let updates = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); | ||
| nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); | ||
| do_commitment_signed_dance(&nodes[2], &nodes[1], &updates.commitment_signed, false, false); | ||
| expect_and_process_pending_htlcs(&nodes[2], false); | ||
|
|
||
| expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat, None, nodes[2].node.get_our_node_id()); | ||
| let path: &[&[_]] = &[&[&nodes[1], &nodes[2]]]; | ||
| do_claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], path, payment_preimage)); | ||
| expect_payment_sent(&nodes[0], payment_preimage, None, true, true); | ||
| } | ||
|
|
||
| #[test] | ||
| fn test_reload_partial_funding_batch() { | ||
| let chanmon_cfgs = create_chanmon_cfgs(3); | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Or maybe all the legacy renames together can go in one preparatory commit