@@ -12973,65 +12973,6 @@ where
1297312973
1297412974 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(args.fee_estimator);
1297512975
12976- for (_, monitor) in args.channel_monitors.iter() {
12977- for (payment_hash, (payment_preimage, _)) in monitor.get_stored_preimages() {
12978- if let Some(payment) = claimable_payments.remove(&payment_hash) {
12979- log_info!(args.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", &payment_hash);
12980- let mut claimable_amt_msat = 0;
12981- let mut receiver_node_id = Some(our_network_pubkey);
12982- let phantom_shared_secret = payment.htlcs[0].prev_hop.phantom_shared_secret;
12983- if phantom_shared_secret.is_some() {
12984- let phantom_pubkey = args.node_signer.get_node_id(Recipient::PhantomNode)
12985- .expect("Failed to get node_id for phantom node recipient");
12986- receiver_node_id = Some(phantom_pubkey)
12987- }
12988- for claimable_htlc in &payment.htlcs {
12989- claimable_amt_msat += claimable_htlc.value;
12990-
12991- // Add a holding-cell claim of the payment to the Channel, which should be
12992- // applied ~immediately on peer reconnection. Because it won't generate a
12993- // new commitment transaction we can just provide the payment preimage to
12994- // the corresponding ChannelMonitor and nothing else.
12995- //
12996- // We do so directly instead of via the normal ChannelMonitor update
12997- // procedure as the ChainMonitor hasn't yet been initialized, implying
12998- // we're not allowed to call it directly yet. Further, we do the update
12999- // without incrementing the ChannelMonitor update ID as there isn't any
13000- // reason to.
13001- // If we were to generate a new ChannelMonitor update ID here and then
13002- // crash before the user finishes block connect we'd end up force-closing
13003- // this channel as well. On the flip side, there's no harm in restarting
13004- // without the new monitor persisted - we'll end up right back here on
13005- // restart.
13006- let previous_channel_id = claimable_htlc.prev_hop.channel_id;
13007- if let Some(peer_node_id) = outpoint_to_peer.get(&claimable_htlc.prev_hop.outpoint) {
13008- let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap();
13009- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
13010- let peer_state = &mut *peer_state_lock;
13011- if let Some(ChannelPhase::Funded(channel)) = peer_state.channel_by_id.get_mut(&previous_channel_id) {
13012- let logger = WithChannelContext::from(&args.logger, &channel.context, Some(payment_hash));
13013- channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &&logger);
13014- }
13015- }
13016- if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) {
13017- previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &bounded_fee_estimator, &args.logger);
13018- }
13019- }
13020- let payment_id = payment.inbound_payment_id(&inbound_payment_id_secret.unwrap());
13021- pending_events_read.push_back((events::Event::PaymentClaimed {
13022- receiver_node_id,
13023- payment_hash,
13024- purpose: payment.purpose,
13025- amount_msat: claimable_amt_msat,
13026- htlcs: payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(),
13027- sender_intended_total_msat: payment.htlcs.first().map(|htlc| htlc.total_msat),
13028- onion_fields: payment.onion_fields,
13029- payment_id: Some(payment_id),
13030- }, None));
13031- }
13032- }
13033- }
13034-
1303512976 for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() {
1303612977 if let Some(peer_state) = per_peer_state.get(&node_id) {
1303712978 for (channel_id, actions) in monitor_update_blocked_actions.iter() {
@@ -13132,6 +13073,72 @@ where
1313213073 default_configuration: args.default_config,
1313313074 };
1313413075
13076+ for (_, monitor) in args.channel_monitors.iter() {
13077+ for (payment_hash, (payment_preimage, _)) in monitor.get_stored_preimages() {
13078+ let per_peer_state = channel_manager.per_peer_state.read().unwrap();
13079+ let mut claimable_payments = channel_manager.claimable_payments.lock().unwrap();
13080+ let payment = claimable_payments.claimable_payments.remove(&payment_hash);
13081+ mem::drop(claimable_payments);
13082+ if let Some(payment) = payment {
13083+ log_info!(channel_manager.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", &payment_hash);
13084+ let mut claimable_amt_msat = 0;
13085+ let mut receiver_node_id = Some(our_network_pubkey);
13086+ let phantom_shared_secret = payment.htlcs[0].prev_hop.phantom_shared_secret;
13087+ if phantom_shared_secret.is_some() {
13088+ let phantom_pubkey = channel_manager.node_signer.get_node_id(Recipient::PhantomNode)
13089+ .expect("Failed to get node_id for phantom node recipient");
13090+ receiver_node_id = Some(phantom_pubkey)
13091+ }
13092+ for claimable_htlc in &payment.htlcs {
13093+ claimable_amt_msat += claimable_htlc.value;
13094+
13095+ // Add a holding-cell claim of the payment to the Channel, which should be
13096+ // applied ~immediately on peer reconnection. Because it won't generate a
13097+ // new commitment transaction we can just provide the payment preimage to
13098+ // the corresponding ChannelMonitor and nothing else.
13099+ //
13100+ // We do so directly instead of via the normal ChannelMonitor update
13101+ // procedure as the ChainMonitor hasn't yet been initialized, implying
13102+ // we're not allowed to call it directly yet. Further, we do the update
13103+ // without incrementing the ChannelMonitor update ID as there isn't any
13104+ // reason to.
13105+ // If we were to generate a new ChannelMonitor update ID here and then
13106+ // crash before the user finishes block connect we'd end up force-closing
13107+ // this channel as well. On the flip side, there's no harm in restarting
13108+ // without the new monitor persisted - we'll end up right back here on
13109+ // restart.
13110+ let previous_channel_id = claimable_htlc.prev_hop.channel_id;
13111+ let peer_node_id_opt = channel_manager.outpoint_to_peer.lock().unwrap()
13112+ .get(&claimable_htlc.prev_hop.outpoint).cloned();
13113+ if let Some(peer_node_id) = peer_node_id_opt {
13114+ let peer_state_mutex = per_peer_state.get(&peer_node_id).unwrap();
13115+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
13116+ let peer_state = &mut *peer_state_lock;
13117+ if let Some(ChannelPhase::Funded(channel)) = peer_state.channel_by_id.get_mut(&previous_channel_id) {
13118+ let logger = WithChannelContext::from(&channel_manager.logger, &channel.context, Some(payment_hash));
13119+ channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &&logger);
13120+ }
13121+ }
13122+ if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) {
13123+ previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &channel_manager.tx_broadcaster, &channel_manager.fee_estimator, &channel_manager.logger);
13124+ }
13125+ }
13126+ let mut pending_events = channel_manager.pending_events.lock().unwrap();
13127+ let payment_id = payment.inbound_payment_id(&inbound_payment_id_secret.unwrap());
13128+ pending_events.push_back((events::Event::PaymentClaimed {
13129+ receiver_node_id,
13130+ payment_hash,
13131+ purpose: payment.purpose,
13132+ amount_msat: claimable_amt_msat,
13133+ htlcs: payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(),
13134+ sender_intended_total_msat: payment.htlcs.first().map(|htlc| htlc.total_msat),
13135+ onion_fields: payment.onion_fields,
13136+ payment_id: Some(payment_id),
13137+ }, None));
13138+ }
13139+ }
13140+ }
13141+
1313513142 for htlc_source in failed_htlcs.drain(..) {
1313613143 let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
1313713144 let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
0 commit comments