@@ -1517,10 +1517,10 @@ where
15171517 return false;
15181518 }
15191519 }
1520- !self
1521- .channel_by_id
1522- .iter()
1523- .any(|(_, channel)| channel.is_funded() || channel.funding().is_outbound() )
1520+ let chan_is_funded_or_outbound = |(_, channel): (_, &Channel<SP>)| {
1521+ channel.is_funded() || channel.funding().is_outbound()
1522+ };
1523+ !self.channel_by_id.iter().any(chan_is_funded_or_outbound )
15241524 && self.monitor_update_blocked_actions.is_empty()
15251525 && self.closed_channel_monitor_update_ids.is_empty()
15261526 }
@@ -3313,17 +3313,14 @@ macro_rules! emit_funding_tx_broadcast_safe_event {
33133313macro_rules! emit_channel_pending_event {
33143314 ($locked_events: expr, $channel: expr) => {
33153315 if $channel.context.should_emit_channel_pending_event() {
3316+ let funding_txo = $channel.funding.get_funding_txo().unwrap();
33163317 $locked_events.push_back((
33173318 events::Event::ChannelPending {
33183319 channel_id: $channel.context.channel_id(),
33193320 former_temporary_channel_id: $channel.context.temporary_channel_id(),
33203321 counterparty_node_id: $channel.context.get_counterparty_node_id(),
33213322 user_channel_id: $channel.context.get_user_id(),
3322- funding_txo: $channel
3323- .funding
3324- .get_funding_txo()
3325- .unwrap()
3326- .into_bitcoin_outpoint(),
3323+ funding_txo: funding_txo.into_bitcoin_outpoint(),
33273324 channel_type: Some($channel.funding.get_channel_type().clone()),
33283325 },
33293326 None,
@@ -3798,8 +3795,8 @@ where
37983795 let mut outbound_scid_alias = 0;
37993796 let mut i = 0;
38003797 loop {
3798+ // fuzzing chacha20 doesn't use the key at all so we always get the same alias
38013799 if cfg!(fuzzing) {
3802- // fuzzing chacha20 doesn't use the key at all so we always get the same alias
38033800 outbound_scid_alias += 1;
38043801 } else {
38053802 outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(
@@ -3931,22 +3928,17 @@ where
39313928 for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
39323929 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
39333930 let peer_state = &mut *peer_state_lock;
3934- res.extend(
3935- peer_state
3936- .channel_by_id
3937- .iter()
3938- // Only `Channels` in the `Channel::Funded` phase can be considered funded.
3939- .filter(|(_, chan)| chan.is_funded())
3940- .filter(f)
3941- .map(|(_channel_id, channel)| {
3942- ChannelDetails::from_channel(
3943- channel,
3944- best_block_height,
3945- peer_state.latest_features.clone(),
3946- &self.fee_estimator,
3947- )
3948- }),
3949- );
3931+ // Only `Channels` in the `Channel::Funded` phase can be considered funded.
3932+ let filtered_chan_by_id =
3933+ peer_state.channel_by_id.iter().filter(|(_, chan)| chan.is_funded()).filter(f);
3934+ res.extend(filtered_chan_by_id.map(|(_channel_id, channel)| {
3935+ ChannelDetails::from_channel(
3936+ channel,
3937+ best_block_height,
3938+ peer_state.latest_features.clone(),
3939+ &self.fee_estimator,
3940+ )
3941+ }));
39503942 }
39513943 }
39523944 res
@@ -4013,12 +4005,8 @@ where
40134005 &self.fee_estimator,
40144006 )
40154007 };
4016- return peer_state
4017- .channel_by_id
4018- .iter()
4019- .map(|(_, chan)| (chan))
4020- .map(channel_to_details)
4021- .collect();
4008+ let chan_by_id = peer_state.channel_by_id.iter();
4009+ return chan_by_id.map(|(_, chan)| (chan)).map(channel_to_details).collect();
40224010 }
40234011 vec![]
40244012 }
@@ -8919,9 +8907,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
89198907 ) -> Result<(), MsgHandleErrInternal> {
89208908 self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel<SP>| {
89218909 match channel.as_unfunded_v2_mut() {
8922- Some(unfunded_channel) => Ok(unfunded_channel
8923- .tx_add_output(msg)
8924- .into_msg_send_event(counterparty_node_id)),
8910+ Some(unfunded_channel) => {
8911+ let msg_send_event = unfunded_channel
8912+ .tx_add_output(msg)
8913+ .into_msg_send_event(counterparty_node_id);
8914+ Ok(msg_send_event)
8915+ },
89258916 None => Err("tx_add_output"),
89268917 }
89278918 })
@@ -8932,9 +8923,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
89328923 ) -> Result<(), MsgHandleErrInternal> {
89338924 self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel<SP>| {
89348925 match channel.as_unfunded_v2_mut() {
8935- Some(unfunded_channel) => Ok(unfunded_channel
8936- .tx_remove_input(msg)
8937- .into_msg_send_event(counterparty_node_id)),
8926+ Some(unfunded_channel) => {
8927+ let msg_send_event = unfunded_channel
8928+ .tx_remove_input(msg)
8929+ .into_msg_send_event(counterparty_node_id);
8930+ Ok(msg_send_event)
8931+ },
89388932 None => Err("tx_remove_input"),
89398933 }
89408934 })
@@ -8945,9 +8939,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
89458939 ) -> Result<(), MsgHandleErrInternal> {
89468940 self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel<SP>| {
89478941 match channel.as_unfunded_v2_mut() {
8948- Some(unfunded_channel) => Ok(unfunded_channel
8949- .tx_remove_output(msg)
8950- .into_msg_send_event(counterparty_node_id)),
8942+ Some(unfunded_channel) => {
8943+ let msg_send_event = unfunded_channel
8944+ .tx_remove_output(msg)
8945+ .into_msg_send_event(counterparty_node_id);
8946+ Ok(msg_send_event)
8947+ },
89518948 None => Err("tx_remove_output"),
89528949 }
89538950 })
@@ -9657,13 +9654,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
96579654 let is_processing_events = self.pending_events_processor.load(Ordering::Acquire);
96589655 let num_forward_events = pending_events
96599656 .iter()
9660- .filter(|(ev, _)| {
9661- if let events::Event::PendingHTLCsForwardable { .. } = ev {
9662- true
9663- } else {
9664- false
9665- }
9666- })
9657+ .filter(|(ev, _)| matches!(ev, events::Event::PendingHTLCsForwardable { .. }))
96679658 .count();
96689659 // We only want to push a PendingHTLCsForwardable event if no others are queued. Processing
96699660 // events is done in batches and they are not removed until we're done processing each
@@ -10954,30 +10945,32 @@ where
1095410945 payer_note: Option<String>, payment_id: PaymentId, retry_strategy: Retry,
1095510946 route_params_config: RouteParametersConfig,
1095610947 ) -> Result<(), Bolt12SemanticError> {
10948+ let create_pending_payment_fn = |invoice_request: &InvoiceRequest, nonce| {
10949+ let expiration = StaleExpiration::TimerTicks(1);
10950+ let retryable_invoice_request = RetryableInvoiceRequest {
10951+ invoice_request: invoice_request.clone(),
10952+ nonce,
10953+ needs_retry: true,
10954+ };
10955+ self.pending_outbound_payments
10956+ .add_new_awaiting_invoice(
10957+ payment_id,
10958+ expiration,
10959+ retry_strategy,
10960+ route_params_config,
10961+ Some(retryable_invoice_request),
10962+ )
10963+ .map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
10964+ };
10965+
1095710966 self.pay_for_offer_intern(
1095810967 offer,
1095910968 quantity,
1096010969 amount_msats,
1096110970 payer_note,
1096210971 payment_id,
1096310972 None,
10964- |invoice_request, nonce| {
10965- let expiration = StaleExpiration::TimerTicks(1);
10966- let retryable_invoice_request = RetryableInvoiceRequest {
10967- invoice_request: invoice_request.clone(),
10968- nonce,
10969- needs_retry: true,
10970- };
10971- self.pending_outbound_payments
10972- .add_new_awaiting_invoice(
10973- payment_id,
10974- expiration,
10975- retry_strategy,
10976- route_params_config,
10977- Some(retryable_invoice_request),
10978- )
10979- .map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
10980- },
10973+ create_pending_payment_fn,
1098110974 )
1098210975 }
1098310976
@@ -11281,9 +11274,8 @@ where
1128111274 }
1128211275
1128311276 fn get_peers_for_blinded_path(&self) -> Vec<MessageForwardNode> {
11284- self.per_peer_state
11285- .read()
11286- .unwrap()
11277+ let per_peer_state = self.per_peer_state.read().unwrap();
11278+ per_peer_state
1128711279 .iter()
1128811280 .map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap()))
1128911281 .filter(|(_, peer)| peer.is_connected)
@@ -12103,13 +12095,10 @@ where
1210312095 self.do_chain_event(None, |channel| {
1210412096 if let Some(funding_txo) = channel.funding.get_funding_txo() {
1210512097 if funding_txo.txid == *txid {
12106- channel
12107- .funding_transaction_unconfirmed(&&WithChannelContext::from(
12108- &self.logger,
12109- &channel.context,
12110- None,
12111- ))
12112- .map(|()| (None, Vec::new(), None))
12098+ let chan_context =
12099+ WithChannelContext::from(&self.logger, &channel.context, None);
12100+ let res = channel.funding_transaction_unconfirmed(&&chan_context);
12101+ res.map(|()| (None, Vec::new(), None))
1211312102 } else {
1211412103 Ok((None, Vec::new(), None))
1211512104 }
@@ -12436,13 +12425,13 @@ where
1243612425 MR::Target: MessageRouter,
1243712426 L::Target: Logger,
1243812427{
12439- fn handle_open_channel(&self, counterparty_node_id: PublicKey, msg : &msgs::OpenChannel) {
12428+ fn handle_open_channel(&self, counterparty_node_id: PublicKey, message : &msgs::OpenChannel) {
1244012429 // Note that we never need to persist the updated ChannelManager for an inbound
1244112430 // open_channel message - pre-funded channels are never written so there should be no
1244212431 // change to the contents.
1244312432 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
12444- let res =
12445- self.internal_open_channel(&counterparty_node_id, OpenChannelMessageRef::V1( msg) );
12433+ let msg = OpenChannelMessageRef::V1(message);
12434+ let res = self.internal_open_channel(&counterparty_node_id, msg);
1244612435 let persist = match &res {
1244712436 Err(e) if e.closes_channel() => {
1244812437 debug_assert!(false, "We shouldn't close a new channel");
@@ -12951,16 +12940,10 @@ where
1295112940 {
1295212941 let RetryableInvoiceRequest { invoice_request, nonce, .. } = retryable_invoice_request;
1295312942
12954- if self
12955- .flow
12956- .enqueue_invoice_request(
12957- invoice_request,
12958- payment_id,
12959- nonce,
12960- self.get_peers_for_blinded_path(),
12961- )
12962- .is_err()
12963- {
12943+ let peers = self.get_peers_for_blinded_path();
12944+ let enqueue_invreq_res =
12945+ self.flow.enqueue_invoice_request(invoice_request, payment_id, nonce, peers);
12946+ if enqueue_invreq_res.is_err() {
1296412947 log_warn!(
1296512948 self.logger,
1296612949 "Retry failed for invoice request with payment_id {}",
@@ -14069,11 +14052,9 @@ impl Readable for VecDeque<(Event, Option<EventCompletionAction>)> {
1406914052 fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
1407014053 let len: u64 = Readable::read(reader)?;
1407114054 const MAX_ALLOC_SIZE: u64 = 1024 * 16;
14072- let mut events: Self = VecDeque::with_capacity(cmp::min(
14073- MAX_ALLOC_SIZE
14074- / mem::size_of::<(events::Event, Option<EventCompletionAction>)>() as u64,
14075- len,
14076- ) as usize);
14055+ let event_size = mem::size_of::<(events::Event, Option<EventCompletionAction>)>();
14056+ let mut events: Self =
14057+ VecDeque::with_capacity(cmp::min(MAX_ALLOC_SIZE / event_size as u64, len) as usize);
1407714058 for _ in 0..len {
1407814059 let ev_opt = MaybeReadable::read(reader)?;
1407914060 let action = Readable::read(reader)?;
0 commit comments