Skip to content

Commit

Permalink
itest: fix multi hop tests
Browse files Browse the repository at this point in the history
  • Loading branch information
yyforyongyu committed Jul 4, 2024
1 parent 4067334 commit 918f8bb
Show file tree
Hide file tree
Showing 4 changed files with 31 additions and 96 deletions.
2 changes: 1 addition & 1 deletion itest/lnd_coop_close_with_htlcs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ func coopCloseWithHTLCs(ht *lntest.HarnessTest) {
ht.AssertTxInMempool(&closeTxid)

// Wait for it to get mined and finish tearing down.
ht.AssertStreamChannelCoopClosed(alice, chanPoint, false, closeClient)
ht.AssertStreamChannelCoopClosed(alice, chanPoint, closeClient)
}

// coopCloseWithHTLCsWithRestart also tests the coop close flow when an HTLC
Expand Down
114 changes: 27 additions & 87 deletions itest/lnd_multi-hop_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ import (
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/node"
"github.com/lightningnetwork/lnd/lntest/rpc"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
"github.com/lightningnetwork/lnd/routing"
Expand Down Expand Up @@ -332,27 +331,9 @@ func runMultiHopHtlcLocalTimeout(ht *lntest.HarnessTest,
// Assert that the HTLC timeout tx is now in the mempool.
ht.AssertOutpointInMempool(htlcTimeoutOutpoint)

// We now wait for 30 seconds to overcome the flake - there's a
// block race between contractcourt and sweeper, causing the
// sweep to be broadcast earlier.
//
// TODO(yy): remove this once `blockbeat` is in place.
numExpected := 1
err := wait.NoError(func() error {
mem := ht.GetRawMempool()
if len(mem) == 2 {
numExpected = 2
return nil
}

return fmt.Errorf("want %d, got %v in mempool: %v",
numExpected, len(mem), mem)
}, wait.DefaultTimeout)
ht.Logf("Checking mempool got: %v", err)

// Mine a block to trigger the sweep of his commit output and
// confirm his HTLC timeout sweep.
ht.MineBlocksAndAssertNumTxes(1, numExpected)
ht.MineBlocksAndAssertNumTxes(1, 1)

// For leased channels, we need to mine one more block to
// confirm Bob's commit output sweep.
Expand Down Expand Up @@ -1880,12 +1861,12 @@ func runMultiHopHtlcAggregation(ht *lntest.HarnessTest,
ht.MineEmptyBlocks(int(numBlocks))

// Bob's force close transaction should now be found in the mempool. If
// there are anchors, we expect it to be offered to Bob's sweeper.
ht.AssertNumTxsInMempool(1)
// there are anchors, we expect it to be offered to Bob's sweeper and
// being swept.
ht.AssertNumTxsInMempool(2)

// Bob has two anchor sweep requests, one for remote (invalid) and the
// other for local.
ht.AssertNumPendingSweeps(bob, 2)
// Bob has one sweep request for his local anchor.
ht.AssertNumPendingSweeps(bob, 1)

closeTx := ht.AssertOutpointInMempool(
ht.OutPointFromChannelPoint(bobChanPoint),
Expand Down Expand Up @@ -1921,11 +1902,9 @@ func runMultiHopHtlcAggregation(ht *lntest.HarnessTest,
// Once bob has force closed, we can restart carol.
require.NoError(ht, restartCarol())

// Mine a block to confirm the closing transaction.
ht.MineBlocksAndAssertNumTxes(1, 1)

// The above mined block will trigger Bob to sweep his anchor output.
ht.AssertNumTxsInMempool(1)
// Mine a block to confirm the closing transaction and the anchor
// sweeping tx.
ht.MineBlocksAndAssertNumTxes(1, 2)

// Let Alice settle her invoices. When Bob now gets the preimages, he
// has no other option than to broadcast his second-level transactions
Expand All @@ -1942,7 +1921,7 @@ func runMultiHopHtlcAggregation(ht *lntest.HarnessTest,
// preimages from Alice. We also expect Carol to sweep her commitment
// output.
case lnrpc.CommitmentType_LEGACY:
ht.AssertNumPendingSweeps(bob, numInvoices*2+1)
ht.AssertNumPendingSweeps(bob, numInvoices*2)
ht.AssertNumPendingSweeps(carol, 1)

expectedTxes = 2*numInvoices + 1
Expand All @@ -1956,26 +1935,25 @@ func runMultiHopHtlcAggregation(ht *lntest.HarnessTest,
lnrpc.CommitmentType_SIMPLE_TAPROOT:

// Bob should have `numInvoices` for both HTLC success and
// timeout txns, plus one anchor sweep.
ht.AssertNumPendingSweeps(bob, numInvoices*2+1)
// timeout txns.
ht.AssertNumPendingSweeps(bob, numInvoices*2)

// Mine one block to trigger the sweep of HTLC success.
ht.MineEmptyBlocks(1)

// Carol should have commit and anchor outputs.
ht.AssertNumPendingSweeps(carol, 2)

// We expect to see three sweeping txns:
// 1. Bob's sweeping tx for all timeout HTLCs.
// 2. Bob's sweeping tx for all success HTLCs.
// 3. Carol's sweeping tx for her commit and anchor outputs.
// 3. Carol's sweeping tx for her commit output.
expectedTxes = 3

default:
ht.Fatalf("unhandled commitment type %v", c)
}

// Mine a block to confirm Bob's anchor sweeping, which will also
// trigger his sweeper to sweep HTLCs.
ht.MineBlocksAndAssertNumTxes(1, 1)

// Assert the sweeping txns are found in the mempool.
txes := ht.GetNumTxsFromMempool(expectedTxes)

Expand Down Expand Up @@ -2044,9 +2022,6 @@ func runMultiHopHtlcAggregation(ht *lntest.HarnessTest,
// Assert the tx has been offered to the sweeper.
ht.AssertNumPendingSweeps(bob, 1)

// Mine one block to trigger the sweep.
ht.MineEmptyBlocks(1)

// Find the commitment sweep.
bobCommitSweep := ht.GetNumTxsFromMempool(1)[0]
ht.AssertTxSpendFrom(bobCommitSweep, closeTxid)
Expand Down Expand Up @@ -2074,11 +2049,11 @@ func runMultiHopHtlcAggregation(ht *lntest.HarnessTest,
case lnrpc.CommitmentType_LEGACY:
ht.MineBlocksAndAssertNumTxes(2, 1)

// Mining one additional block, Bob's second level tx is mature, and he
// can sweep the output. Before the blocks are mined, we should expect
// to see Bob's commit sweep in the mempool.
// Mining two additional blocks, Bob's second level tx is mature, and
// he can sweep the output. Before the blocks are mined, we should
// expect to see Bob's commit sweep in the mempool.
case lnrpc.CommitmentType_ANCHORS, lnrpc.CommitmentType_SIMPLE_TAPROOT:
ht.MineBlocksAndAssertNumTxes(1, 1)
ht.MineBlocksAndAssertNumTxes(2, 1)

// Since Bob is the initiator of the Bob-Carol script-enforced leased
// channel, he incurs an additional CLTV when sweeping outputs back to
Expand All @@ -2095,47 +2070,24 @@ func runMultiHopHtlcAggregation(ht *lntest.HarnessTest,
height := ht.CurrentHeight()
bob.AddToLogf("itest: now mine %d blocks at height %d",
numBlocks, height)
ht.MineEmptyBlocks(int(numBlocks) - 1)
ht.MineEmptyBlocks(int(numBlocks))

default:
ht.Fatalf("unhandled commitment type %v", c)
}

// Make sure Bob's sweeper has received all the sweeping requests.
ht.AssertNumPendingSweeps(bob, numInvoices*2)

// Mine one block to trigger the sweeps.
ht.MineEmptyBlocks(1)

//
// For leased channels, Bob's commit output will mature after the above
// block.
if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
ht.AssertNumPendingSweeps(bob, numInvoices*2+1)
} else {
ht.AssertNumPendingSweeps(bob, numInvoices*2)
}

// We now wait for 30 seconds to overcome the flake - there's a block
// race between contractcourt and sweeper, causing the sweep to be
// broadcast earlier.
//
// TODO(yy): remove this once `blockbeat` is in place.
numExpected := 1
err := wait.NoError(func() error {
mem := ht.GetRawMempool()
if len(mem) == numExpected {
return nil
}

if len(mem) > 0 {
numExpected = len(mem)
}

return fmt.Errorf("want %d, got %v in mempool: %v", numExpected,
len(mem), mem)
}, wait.DefaultTimeout)
ht.Logf("Checking mempool got: %v", err)

// Make sure it spends from the second level tx.
secondLevelSweep := ht.GetNumTxsFromMempool(numExpected)[0]
secondLevelSweep := ht.GetNumTxsFromMempool(1)[0]
bobSweep := secondLevelSweep.TxHash()

// It should be sweeping all the second-level outputs.
Expand All @@ -2154,26 +2106,14 @@ func runMultiHopHtlcAggregation(ht *lntest.HarnessTest,
}
}

// TODO(yy): bring the following check back when `blockbeat` is in
// place - atm we may have two sweeping transactions in the mempool.
// require.Equal(ht, 2*numInvoices, secondLvlSpends)
require.Equal(ht, 2*numInvoices, secondLvlSpends)

// When we mine one additional block, that will confirm Bob's second
// level sweep. Now Bob should have no pending channels anymore, as
// this just resolved it by the confirmation of the sweep transaction.
block := ht.MineBlocksAndAssertNumTxes(1, numExpected)[0]
block := ht.MineBlocksAndAssertNumTxes(1, 1)[0]
ht.AssertTxInBlock(block, &bobSweep)

// For leased channels, we need to mine one more block to confirm Bob's
// commit output sweep.
//
// NOTE: we mine this block conditionally, as the commit output may
// have already been swept one block earlier due to the race in block
// consumption among subsystems.
pendingChanResp := bob.RPC.PendingChannels()
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
ht.MineBlocksAndAssertNumTxes(1, 1)
}
ht.AssertNumPendingForceClose(bob, 0)

// THe channel with Alice is still open.
Expand Down
2 changes: 1 addition & 1 deletion lntest/harness.go
Original file line number Diff line number Diff line change
Expand Up @@ -1321,7 +1321,7 @@ func (h *HarnessTest) CloseChannel(hn *node.HarnessNode,

stream, _ := h.CloseChannelAssertPending(hn, cp, false)

return h.AssertStreamChannelCoopClosed(hn, cp, false, stream)
return h.AssertStreamChannelCoopClosed(hn, cp, stream)
}

// ForceCloseChannel attempts to force close a non-anchored channel identified
Expand Down
9 changes: 2 additions & 7 deletions lntest/harness_assertion.go
Original file line number Diff line number Diff line change
Expand Up @@ -633,8 +633,7 @@ func (h *HarnessTest) AssertNumPendingForceClose(hn *node.HarnessNode,
// - assert the node has zero waiting close channels.
// - assert the node has seen the channel close update.
func (h *HarnessTest) AssertStreamChannelCoopClosed(hn *node.HarnessNode,
cp *lnrpc.ChannelPoint, anchors bool,
stream rpc.CloseChanClient) *chainhash.Hash {
cp *lnrpc.ChannelPoint, stream rpc.CloseChanClient) *chainhash.Hash {

// Assert the channel is waiting close.
resp := h.AssertChannelWaitingClose(hn, cp)
Expand All @@ -647,11 +646,7 @@ func (h *HarnessTest) AssertStreamChannelCoopClosed(hn *node.HarnessNode,
// We'll now, generate a single block, wait for the final close status
// update, then ensure that the closing transaction was included in the
// block. If there are anchors, we also expect an anchor sweep.
expectedTxes := 1
if anchors {
expectedTxes = 2
}
block := h.MineBlocksAndAssertNumTxes(1, expectedTxes)[0]
block := h.MineBlocksAndAssertNumTxes(1, 1)[0]

// Consume one close event and assert the closing txid can be found in
// the block.
Expand Down

0 comments on commit 918f8bb

Please sign in to comment.