Skip to content

Commit a06ebee

Browse files
committed
Stop enqueueing error messages for disconnected peers
If a channel is failed while a peer is disconnected, we'll still have a `PeerState` for that peer. Historically, we haven't bothered to check if a peer is actually connected before we push the `error` message onto the `PeerState::pending_msg_events` queue, leaving us sending messages into the void. This is generally not an issue as `ChannelManager::get_and_clear_pending_msg_events` should be called very regularly, removing these messages and then dropping them as `PeerManager` won't have anything to do with them. Further, when the the message is an `error`, if a peer happens to connect between when we push the message and when `get_and_clear_pending_msg_events` is called the worst that happens is they get the `error` message we'd end up sending them when they try to reestablish the channel anyway. Still, its awkward to leave the `error`s lying around in a message queue for a disconnected peer, so we remove them here.
1 parent 92e79ab commit a06ebee

File tree

6 files changed

+28
-16
lines changed

6 files changed

+28
-16
lines changed

lightning/src/ln/chanmon_update_fail_tests.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3897,7 +3897,7 @@ fn do_test_durable_preimages_on_closed_channel(
38973897
}
38983898
}
38993899
if !close_chans_before_reload {
3900-
check_closed_broadcast(&nodes[1], 1, true);
3900+
check_closed_broadcast(&nodes[1], 1, false);
39013901
let reason = ClosureReason::CommitmentTxConfirmed;
39023902
check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000);
39033903
}
@@ -3914,7 +3914,7 @@ fn do_test_durable_preimages_on_closed_channel(
39143914
check_spends!(bs_preimage_tx, as_closing_tx[0]);
39153915

39163916
mine_transactions(&nodes[0], &[&as_closing_tx[0], bs_preimage_tx]);
3917-
check_closed_broadcast(&nodes[0], 1, true);
3917+
check_closed_broadcast(&nodes[0], 1, false);
39183918
expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
39193919

39203920
if !close_chans_before_reload || close_only_a {
@@ -4063,7 +4063,7 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) {
40634063
Event::ChannelClosed { .. } => {},
40644064
_ => panic!(),
40654065
}
4066-
check_closed_broadcast!(nodes[1], true);
4066+
check_closed_broadcast(&nodes[1], 1, false);
40674067
}
40684068

40694069
// Once we run event processing the monitor should free, check that it was indeed the B<->C

lightning/src/ln/channelmanager.rs

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3228,7 +3228,9 @@ macro_rules! handle_error {
32283228
let per_peer_state = $self.per_peer_state.read().unwrap();
32293229
if let Some(peer_state_mutex) = per_peer_state.get(&$counterparty_node_id) {
32303230
let mut peer_state = peer_state_mutex.lock().unwrap();
3231-
peer_state.pending_msg_events.push(msg_event);
3231+
if peer_state.is_connected {
3232+
peer_state.pending_msg_events.push(msg_event);
3233+
}
32323234
}
32333235
}
32343236

@@ -18808,7 +18810,7 @@ mod tests {
1880818810
.node
1880918811
.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone())
1881018812
.unwrap();
18811-
check_closed_broadcast(&nodes[0], 1, true);
18813+
check_closed_broadcast(&nodes[0], 1, false);
1881218814
check_added_monitors(&nodes[0], 1);
1881318815
let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message };
1881418816
check_closed_event!(nodes[0], 1, reason, [nodes[1].node.get_our_node_id()], 100000);

lightning/src/ln/functional_tests.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8023,7 +8023,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
80238023
.force_close_broadcasting_latest_txn(&channel_id, &node_c_id, message.clone())
80248024
.unwrap();
80258025

8026-
check_closed_broadcast!(nodes[1], true);
8026+
check_closed_broadcast(&nodes[1], 1, false);
80278027
let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message };
80288028
check_closed_event!(nodes[1], 1, reason, [node_c_id], 100000);
80298029
check_added_monitors(&nodes[1], 1);

lightning/src/ln/monitor_tests.rs

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3044,7 +3044,7 @@ fn do_test_anchors_monitor_fixes_counterparty_payment_script_on_reload(confirm_c
30443044
let serialized_monitor = get_monitor!(nodes[1], chan_id).encode();
30453045
reload_node!(nodes[1], user_config, &nodes[1].node.encode(), &[&serialized_monitor], persister, chain_monitor, node_deserialized);
30463046
let commitment_tx_conf_height = block_from_scid(mine_transaction(&nodes[1], &commitment_tx));
3047-
check_closed_broadcast(&nodes[1], 1, true);
3047+
check_closed_broadcast(&nodes[1], 1, false);
30483048
check_added_monitors(&nodes[1], 1);
30493049
commitment_tx_conf_height
30503050
};
@@ -3407,7 +3407,7 @@ fn do_test_lost_preimage_monitor_events(on_counterparty_tx: bool) {
34073407
check_added_monitors(&nodes[2], 1);
34083408
let c_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message };
34093409
check_closed_event!(nodes[2], 1, c_reason, [node_b_id], 1_000_000);
3410-
check_closed_broadcast!(nodes[2], true);
3410+
check_closed_broadcast(&nodes[2], 1, false);
34113411

34123412
handle_bump_events(&nodes[2], true, 0);
34133413
let cs_commit_tx = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
@@ -3421,7 +3421,7 @@ fn do_test_lost_preimage_monitor_events(on_counterparty_tx: bool) {
34213421
check_added_monitors(&nodes[1], 1);
34223422
let b_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message };
34233423
check_closed_event!(nodes[1], 1, b_reason, [node_c_id], 1_000_000);
3424-
check_closed_broadcast!(nodes[1], true);
3424+
check_closed_broadcast(&nodes[1], 1, false);
34253425

34263426
handle_bump_events(&nodes[1], true, 0);
34273427
let bs_commit_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
@@ -3618,7 +3618,7 @@ fn do_test_lost_timeout_monitor_events(confirm_tx: CommitmentType, dust_htlcs: b
36183618
check_added_monitors(&nodes[2], 1);
36193619
let c_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message };
36203620
check_closed_event!(nodes[2], 1, c_reason, [node_b_id], 1_000_000);
3621-
check_closed_broadcast!(nodes[2], true);
3621+
check_closed_broadcast(&nodes[2], 1, false);
36223622

36233623
handle_bump_events(&nodes[2], true, 0);
36243624
let cs_commit_tx = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
@@ -3632,7 +3632,7 @@ fn do_test_lost_timeout_monitor_events(confirm_tx: CommitmentType, dust_htlcs: b
36323632
check_added_monitors(&nodes[1], 1);
36333633
let b_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message };
36343634
check_closed_event!(nodes[1], 1, b_reason, [node_c_id], 1_000_000);
3635-
check_closed_broadcast!(nodes[1], true);
3635+
check_closed_broadcast(&nodes[1], 1, false);
36363636

36373637
handle_bump_events(&nodes[1], true, 0);
36383638
let bs_commit_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);

lightning/src/ln/payment_tests.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1287,7 +1287,7 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload(
12871287
expect_payment_claimed!(nodes[1], payment_hash, 10_000_000);
12881288

12891289
mine_transaction(&nodes[1], &commitment_tx);
1290-
check_closed_broadcast!(nodes[1], true);
1290+
check_closed_broadcast(&nodes[1], 1, false);
12911291
check_added_monitors!(nodes[1], 1);
12921292
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000);
12931293
let htlc_success_tx = {

lightning/src/ln/reorg_tests.rs

Lines changed: 14 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -325,6 +325,16 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
325325
let chan_0_monitor_serialized = get_monitor!(nodes[0], chan.2).encode();
326326

327327
reload_node!(nodes[0], nodes[0].node.get_current_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
328+
329+
nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
330+
331+
if reorg_after_reload {
332+
// If we haven't yet closed the channel, reconnect the peers so that nodes[0] will
333+
// generate an error message we can handle below.
334+
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
335+
reconnect_args.send_channel_ready = (true, true);
336+
reconnect_nodes(reconnect_args);
337+
}
328338
}
329339

330340
if reorg_after_reload {
@@ -387,7 +397,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
387397
[nodes[1].node.get_our_node_id()], 100000);
388398

389399
// Now check that we can create a new channel
390-
if reload_node {
400+
if reload_node && !reorg_after_reload {
391401
// If we dropped the channel before reloading the node, nodes[1] was also dropped from
392402
// nodes[0] storage, and hence not connected again on startup. We therefore need to
393403
// reconnect to the node before attempting to create a new channel.
@@ -879,7 +889,7 @@ fn do_test_retries_own_commitment_broadcast_after_reorg(anchors: bool, revoked_c
879889
.node
880890
.force_close_broadcasting_latest_txn(&chan_id, &nodes[0].node.get_our_node_id(), message.clone())
881891
.unwrap();
882-
check_closed_broadcast(&nodes[1], 1, true);
892+
check_closed_broadcast(&nodes[1], 1, !revoked_counterparty_commitment);
883893
check_added_monitors(&nodes[1], 1);
884894
let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message };
885895
check_closed_event(&nodes[1], 1, reason, false, &[nodes[0].node.get_our_node_id()], 100_000);
@@ -1002,7 +1012,7 @@ fn do_test_split_htlc_expiry_tracking(use_third_htlc: bool, reorg_out: bool) {
10021012
// Force-close and fetch node B's commitment transaction and the transaction claiming the first
10031013
// two HTLCs.
10041014
nodes[1].node.force_close_broadcasting_latest_txn(&chan_id, &node_a_id, err).unwrap();
1005-
check_closed_broadcast(&nodes[1], 1, true);
1015+
check_closed_broadcast(&nodes[1], 1, false);
10061016
check_added_monitors(&nodes[1], 1);
10071017
let message = "Channel force-closed".to_owned();
10081018
let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message };
@@ -1015,7 +1025,7 @@ fn do_test_split_htlc_expiry_tracking(use_third_htlc: bool, reorg_out: bool) {
10151025
check_spends!(commitment_tx, funding_tx);
10161026

10171027
mine_transaction(&nodes[0], &commitment_tx);
1018-
check_closed_broadcast(&nodes[0], 1, true);
1028+
check_closed_broadcast(&nodes[0], 1, false);
10191029
let reason = ClosureReason::CommitmentTxConfirmed;
10201030
check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 10_000_000);
10211031
check_added_monitors(&nodes[0], 1);

0 commit comments

Comments
 (0)