|
1 | 1 | use crate::chain::ChannelMonitorUpdateStatus; |
| 2 | +use crate::events::Event; |
2 | 3 | use crate::events::HTLCDestination; |
3 | 4 | use crate::events::MessageSendEvent; |
4 | 5 | use crate::events::MessageSendEventsProvider; |
@@ -338,3 +339,128 @@ fn test_quiescence_waits_for_monitor_update_complete() { |
338 | 339 | expect_payment_sent(&nodes[0], preimage, None, true, true); |
339 | 340 | expect_payment_claimed!(&nodes[1], payment_hash, payment_amount); |
340 | 341 | } |
| 342 | + |
| 343 | +#[test] |
| 344 | +fn test_quiescence_updates_go_to_holding_cell() { |
| 345 | + quiescence_updates_go_to_holding_cell(false); |
| 346 | + quiescence_updates_go_to_holding_cell(true); |
| 347 | +} |
| 348 | + |
| 349 | +fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) { |
| 350 | + // Test that any updates made to a channel while quiescent go to the holding cell. |
| 351 | + let chanmon_cfgs = create_chanmon_cfgs(2); |
| 352 | + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); |
| 353 | + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); |
| 354 | + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); |
| 355 | + let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; |
| 356 | + |
| 357 | + let node_id_0 = nodes[0].node.get_our_node_id(); |
| 358 | + let node_id_1 = nodes[1].node.get_our_node_id(); |
| 359 | + |
| 360 | + // Send enough to be able to pay from both directions. |
| 361 | + let payment_amount = 1_000_000; |
| 362 | + send_payment(&nodes[0], &[&nodes[1]], payment_amount * 4); |
| 363 | + |
| 364 | + // Propose quiescence from nodes[1], and immediately try to send a payment. Since its `stfu` has |
| 365 | + // already gone out first, the outbound HTLC will go into the holding cell. |
| 366 | + nodes[1].node.maybe_propose_quiescence(&node_id_0, &chan_id).unwrap(); |
| 367 | + let stfu = get_event_msg!(&nodes[1], MessageSendEvent::SendStfu, node_id_0); |
| 368 | + |
| 369 | + let (route1, payment_hash1, payment_preimage1, payment_secret1) = |
| 370 | + get_route_and_payment_hash!(&nodes[1], &nodes[0], payment_amount); |
| 371 | + let onion1 = RecipientOnionFields::secret_only(payment_secret1); |
| 372 | + let payment_id1 = PaymentId(payment_hash1.0); |
| 373 | + nodes[1].node.send_payment_with_route(route1, payment_hash1, onion1, payment_id1).unwrap(); |
| 374 | + check_added_monitors!(&nodes[1], 0); |
| 375 | + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); |
| 376 | + |
| 377 | + // Send a payment in the opposite direction. Since nodes[0] hasn't sent its own `stfu` yet, it's |
| 378 | + // allowed to make updates. |
| 379 | + let (route2, payment_hash2, payment_preimage2, payment_secret2) = |
| 380 | + get_route_and_payment_hash!(&nodes[0], &nodes[1], payment_amount); |
| 381 | + let onion2 = RecipientOnionFields::secret_only(payment_secret2); |
| 382 | + let payment_id2 = PaymentId(payment_hash2.0); |
| 383 | + nodes[0].node.send_payment_with_route(route2, payment_hash2, onion2, payment_id2).unwrap(); |
| 384 | + check_added_monitors!(&nodes[0], 1); |
| 385 | + |
| 386 | + let update_add = get_htlc_update_msgs!(&nodes[0], node_id_1); |
| 387 | + nodes[1].node.handle_update_add_htlc(node_id_0, &update_add.update_add_htlcs[0]); |
| 388 | + commitment_signed_dance!(&nodes[1], &nodes[0], update_add.commitment_signed, false); |
| 389 | + expect_pending_htlcs_forwardable!(&nodes[1]); |
| 390 | + expect_payment_claimable!(nodes[1], payment_hash2, payment_secret2, payment_amount); |
| 391 | + |
| 392 | + // Have nodes[1] attempt to fail/claim nodes[0]'s payment. Since nodes[1] already sent out |
| 393 | + // `stfu`, the `update_fail/fulfill` will go into the holding cell. |
| 394 | + if fail_htlc { |
| 395 | + nodes[1].node.fail_htlc_backwards(&payment_hash2); |
| 396 | + let failed_payment = HTLCDestination::FailedPayment { payment_hash: payment_hash2 }; |
| 397 | + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![failed_payment]); |
| 398 | + } else { |
| 399 | + nodes[1].node.claim_funds(payment_preimage2); |
| 400 | + check_added_monitors(&nodes[1], 1); |
| 401 | + } |
| 402 | + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); |
| 403 | + |
| 404 | + // Finish the quiescence handshake. |
| 405 | + nodes[0].node.handle_stfu(node_id_1, &stfu); |
| 406 | + let stfu = get_event_msg!(&nodes[0], MessageSendEvent::SendStfu, node_id_1); |
| 407 | + nodes[1].node.handle_stfu(node_id_0, &stfu); |
| 408 | + |
| 409 | + nodes[0].node.exit_quiescence(&node_id_1, &chan_id).unwrap(); |
| 410 | + nodes[1].node.exit_quiescence(&node_id_0, &chan_id).unwrap(); |
| 411 | + |
| 412 | + // Now that quiescence is over, nodes are allowed to make updates again. nodes[1] will have its |
| 413 | + // outbound HTLC finally go out, along with the fail/claim of nodes[0]'s payment. |
| 414 | + let update = get_htlc_update_msgs!(&nodes[1], node_id_0); |
| 415 | + check_added_monitors(&nodes[1], 1); |
| 416 | + nodes[0].node.handle_update_add_htlc(node_id_1, &update.update_add_htlcs[0]); |
| 417 | + if fail_htlc { |
| 418 | + nodes[0].node.handle_update_fail_htlc(node_id_1, &update.update_fail_htlcs[0]); |
| 419 | + } else { |
| 420 | + nodes[0].node.handle_update_fulfill_htlc(node_id_1, &update.update_fulfill_htlcs[0]); |
| 421 | + } |
| 422 | + commitment_signed_dance!(&nodes[0], &nodes[1], update.commitment_signed, false); |
| 423 | + |
| 424 | + if !fail_htlc { |
| 425 | + expect_payment_claimed!(nodes[1], payment_hash2, payment_amount); |
| 426 | + } |
| 427 | + |
| 428 | + let events = nodes[0].node.get_and_clear_pending_events(); |
| 429 | + assert_eq!(events.len(), 3); |
| 430 | + assert!(events.iter().find(|e| matches!(e, Event::PendingHTLCsForwardable { .. })).is_some()); |
| 431 | + if fail_htlc { |
| 432 | + assert!(events.iter().find(|e| matches!(e, Event::PaymentFailed { .. })).is_some()); |
| 433 | + assert!(events.iter().find(|e| matches!(e, Event::PaymentPathFailed { .. })).is_some()); |
| 434 | + } else { |
| 435 | + assert!(events.iter().find(|e| matches!(e, Event::PaymentSent { .. })).is_some()); |
| 436 | + assert!(events.iter().find(|e| matches!(e, Event::PaymentPathSuccessful { .. })).is_some()); |
| 437 | + check_added_monitors(&nodes[0], 1); |
| 438 | + } |
| 439 | + nodes[0].node.process_pending_htlc_forwards(); |
| 440 | + expect_payment_claimable!(nodes[0], payment_hash1, payment_secret1, payment_amount); |
| 441 | + |
| 442 | + if fail_htlc { |
| 443 | + nodes[0].node.fail_htlc_backwards(&payment_hash1); |
| 444 | + let failed_payment = HTLCDestination::FailedPayment { payment_hash: payment_hash1 }; |
| 445 | + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[0], vec![failed_payment]); |
| 446 | + } else { |
| 447 | + nodes[0].node.claim_funds(payment_preimage1); |
| 448 | + } |
| 449 | + check_added_monitors(&nodes[0], 1); |
| 450 | + |
| 451 | + let update = get_htlc_update_msgs!(&nodes[0], node_id_1); |
| 452 | + if fail_htlc { |
| 453 | + nodes[1].node.handle_update_fail_htlc(node_id_0, &update.update_fail_htlcs[0]); |
| 454 | + } else { |
| 455 | + nodes[1].node.handle_update_fulfill_htlc(node_id_0, &update.update_fulfill_htlcs[0]); |
| 456 | + } |
| 457 | + commitment_signed_dance!(&nodes[1], &nodes[0], update.commitment_signed, false); |
| 458 | + |
| 459 | + if fail_htlc { |
| 460 | + let conditions = PaymentFailedConditions::new(); |
| 461 | + expect_payment_failed_conditions(&nodes[1], payment_hash1, true, conditions); |
| 462 | + } else { |
| 463 | + expect_payment_claimed!(nodes[0], payment_hash1, payment_amount); |
| 464 | + expect_payment_sent(&nodes[1], payment_preimage1, None, true, true); |
| 465 | + } |
| 466 | +} |
0 commit comments