Skip to content

Commit 677f2c3

Browse files
committed
lntest: re-define CleanupForceClose
To reflect the new sweeping behavior, also makes it easier to be used as we need to method to quickly cleanup force closes without concerning the details when we are not testing the force close behavior.
1 parent 7aba5cb commit 677f2c3

File tree

2 files changed

+16
-32
lines changed

2 files changed

+16
-32
lines changed

lntest/harness.go

Lines changed: 4 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1626,31 +1626,14 @@ func (h *HarnessTest) OpenChannelPsbt(srcNode, destNode *node.HarnessNode,
16261626
return respStream, upd.PsbtFund.Psbt
16271627
}
16281628

1629-
// CleanupForceClose mines a force close commitment found in the mempool and
1630-
// the following sweep transaction from the force closing node.
1629+
// CleanupForceClose mines blocks to clean up the force close process. This is
1630+
// used for tests that are not asserting the expected behavior is found during
1631+
// the force close process, e.g., num of sweeps, etc. Instead, it provides a
1632+
// shortcut to move the test forward with a clean mempool.
16311633
func (h *HarnessTest) CleanupForceClose(hn *node.HarnessNode) {
16321634
// Wait for the channel to be marked pending force close.
16331635
h.AssertNumPendingForceClose(hn, 1)
16341636

1635-
// Mine enough blocks for the node to sweep its funds from the force
1636-
// closed channel. The commit sweep resolver is able to offer the input
1637-
// to the sweeper at defaulCSV-1, and broadcast the sweep tx once one
1638-
// more block is mined.
1639-
//
1640-
// NOTE: we might empty blocks here as we don't know the exact number
1641-
// of blocks to mine. This may end up mining more blocks than needed.
1642-
h.MineEmptyBlocks(node.DefaultCSV - 1)
1643-
1644-
// Assert there is one pending sweep.
1645-
h.AssertNumPendingSweeps(hn, 1)
1646-
1647-
// Mine a block to trigger the sweep.
1648-
h.MineEmptyBlocks(1)
1649-
1650-
// The node should now sweep the funds, clean up by mining the sweeping
1651-
// tx.
1652-
h.MineBlocksAndAssertNumTxes(1, 1)
1653-
16541637
// Mine blocks to get any second level HTLC resolved. If there are no
16551638
// HTLCs, this will behave like h.AssertNumPendingCloseChannels.
16561639
h.mineTillForceCloseResolved(hn)

lntest/harness_miner.go

Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -167,24 +167,25 @@ func (h *HarnessTest) cleanMempool() {
167167
}
168168

169169
// mineTillForceCloseResolved asserts that the number of pending close channels
170-
// are zero. Each time it checks, a new block is mined using MineBlocksSlow to
171-
// give the node some time to catch up the chain.
172-
//
173-
// NOTE: this method is a workaround to make sure we have a clean mempool at
174-
// the end of a channel force closure. We cannot directly mine blocks and
175-
// assert channels being fully closed because the subsystems in lnd don't share
176-
// the same block height. This is especially the case when blocks are produced
177-
// too fast.
178-
// TODO(yy): remove this workaround when syncing blocks are unified in all the
179-
// subsystems.
170+
// are zero. Each time it checks, an empty block is mined, followed by a
171+
// mempool check to see if there are any sweeping txns. If found, these txns
172+
// are then mined to clean up the mempool.
180173
func (h *HarnessTest) mineTillForceCloseResolved(hn *node.HarnessNode) {
181174
_, startHeight := h.GetBestBlock()
182175

183176
err := wait.NoError(func() error {
184177
resp := hn.RPC.PendingChannels()
185178
total := len(resp.PendingForceClosingChannels)
186179
if total != 0 {
187-
h.MineBlocks(1)
180+
// Mine an empty block first.
181+
h.MineEmptyBlocks(1)
182+
183+
// If there are new sweeping txns, mine a block to
184+
// confirm it.
185+
mem := h.GetRawMempool()
186+
if len(mem) != 0 {
187+
h.MineBlocksAndAssertNumTxes(1, len(mem))
188+
}
188189

189190
return fmt.Errorf("expected num of pending force " +
190191
"close channel to be zero")

0 commit comments

Comments
 (0)