diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index 0f101f866d..ceeeaaeb16 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -34,204 +34,6 @@ env:
GO_VERSION: 1.22.6
jobs:
- ########################
- # SQLC code gen check
- ########################
- sqlc-check:
- name: Sqlc check
- runs-on: ubuntu-latest
- steps:
- - name: git checkout
- uses: actions/checkout@v3
-
- - name: setup go ${{ env.GO_VERSION }}
- uses: ./.github/actions/setup-go
- with:
- go-version: '${{ env.GO_VERSION }}'
-
- - name: docker image cache
- uses: satackey/action-docker-layer-caching@v0.0.11
- # Ignore the failure of a step and avoid terminating the job.
- continue-on-error: true
-
- - name: Generate sql models
- run: make sqlc-check
-
- ########################
- # RPC and mobile compilation check
- ########################
- rpc-check:
- name: RPC and mobile compilation check
- runs-on: ubuntu-latest
- steps:
- - name: git checkout
- uses: actions/checkout@v3
-
- - name: setup go ${{ env.GO_VERSION }}
- uses: ./.github/actions/setup-go
- with:
- go-version: '${{ env.GO_VERSION }}'
-
- - name: run check
- run: make rpc-check
-
- - name: run JSON/WASM stub compilation check
- run: make rpc-js-compile
-
- - name: build mobile RPC bindings
- run: make mobile-rpc
-
- - name: build mobile specific code
- run: go build --tags="mobile" ./mobile
-
- ########################
- # check commits
- ########################
- check-commits:
- if: github.event_name == 'pull_request'
- name: check commits
- runs-on: ubuntu-latest
- steps:
- - name: git checkout
- uses: actions/checkout@v3
- with:
- fetch-depth: 0
-
- - name: setup go ${{ env.GO_VERSION }}
- uses: ./.github/actions/setup-go
- with:
- go-version: '${{ env.GO_VERSION }}'
-
- - name: fetch and rebase on ${{ github.base_ref }}
- uses: ./.github/actions/rebase
-
- - name: check commits
- run: scripts/check-each-commit.sh upstream/${{ github.base_ref }}
-
- ########################
- # lint code
- ########################
- lint:
- name: lint code
- runs-on: ubuntu-latest
- steps:
- - name: git checkout
- uses: actions/checkout@v3
- with:
- fetch-depth: 0
-
- - name: setup go ${{ env.GO_VERSION }}
- uses: ./.github/actions/setup-go
- with:
- go-version: '${{ env.GO_VERSION }}'
-
- - name: check code format
- run: make fmt-check
-
- - name: check go modules tidiness
- run: make tidy-module-check
-
- - name: lint proto files
- run: make protolint
-
- - name: lint
- run: GOGC=50 make lint
-
- ########################
- # cross compilation
- ########################
- cross-compile:
- name: cross compilation
- runs-on: ubuntu-latest
- steps:
- - name: git checkout
- uses: actions/checkout@v3
-
- - name: setup go ${{ env.GO_VERSION }}
- uses: ./.github/actions/setup-go
- with:
- go-version: '${{ env.GO_VERSION }}'
- key-prefix: cross-compile
-
- - name: build release for all architectures
- run: make release
-
- ########################
- # sample configuration check
- ########################
- sample-conf-check:
- name: sample configuration check
- runs-on: ubuntu-latest
- steps:
- - name: git checkout
- uses: actions/checkout@v3
-
- - name: setup go ${{ env.GO_VERSION }}
- uses: ./.github/actions/setup-go
- with:
- go-version: '${{ env.GO_VERSION }}'
-
- - name: check default values in sample-lnd.conf file
- run: make sample-conf-check
-
- ########################
- # run unit tests
- ########################
- unit-test:
- name: run unit tests
- runs-on: ubuntu-latest
- strategy:
- # Allow other tests in the matrix to continue if one fails.
- fail-fast: false
- matrix:
- unit_type:
- - btcd unit-cover
- - unit tags="kvdb_etcd"
- - unit tags="kvdb_postgres"
- - unit tags="kvdb_sqlite"
- - btcd unit-race
- - unit-module
-
- steps:
- - name: git checkout
- uses: actions/checkout@v3
- with:
- fetch-depth: 0
-
- - name: fetch and rebase on ${{ github.base_ref }}
- if: github.event_name == 'pull_request'
- uses: ./.github/actions/rebase
-
- - name: git checkout fuzzing seeds
- uses: actions/checkout@v3
- with:
- repository: lightninglabs/lnd-fuzz
- path: lnd-fuzz
-
- - name: rsync fuzzing seeds
- run: rsync -a --ignore-existing lnd-fuzz/ ./
-
- - name: setup go ${{ env.GO_VERSION }}
- uses: ./.github/actions/setup-go
- with:
- go-version: '${{ env.GO_VERSION }}'
- key-prefix: unit-test
-
- - name: install bitcoind
- run: ./scripts/install_bitcoind.sh $BITCOIN_VERSION
-
- - name: run ${{ matrix.unit_type }}
- run: make ${{ matrix.unit_type }}
-
- - name: Send coverage
- uses: shogo82148/actions-goveralls@v1
- if: matrix.unit_type == 'btcd unit-cover'
- with:
- path-to-profile: coverage.txt
- flag-name: 'unit'
- parallel: true
-
-
########################
# run ubuntu integration tests
########################
@@ -284,7 +86,7 @@ jobs:
run: ./scripts/install_bitcoind.sh $BITCOIN_VERSION
- name: run ${{ matrix.name }}
- run: make itest-parallel tranches=${{ env.TRANCHES }} ${{ matrix.args }}
+ run: make itest-parallel tranches=${{ env.TRANCHES }} ${{ matrix.args }} shuffleseed=${{ github.run_id }}${{ strategy.job-index }}
- name: Send coverage
if: ${{ contains(matrix.args, 'cover=1') }}
@@ -332,7 +134,7 @@ jobs:
key-prefix: integration-test
- name: run itest
- run: make itest-parallel tranches=${{ env.TRANCHES }} windows=1
+ run: make itest-parallel tranches=${{ env.TRANCHES }} windows=1 shuffleseed=${{ github.run_id }}
- name: kill any remaining lnd processes
if: ${{ failure() }}
@@ -375,14 +177,8 @@ jobs:
go-version: '${{ env.GO_VERSION }}'
key-prefix: integration-test
- - name: install bitcoind
- run: |
- wget https://bitcoincore.org/bin/bitcoin-core-${BITCOIN_VERSION}.0/bitcoin-${BITCOIN_VERSION}.0-arm64-apple-darwin.tar.gz
- tar zxvf bitcoin-${BITCOIN_VERSION}.0-arm64-apple-darwin.tar.gz
- mv bitcoin-${BITCOIN_VERSION}.0 /tmp/bitcoin
-
- name: run itest
- run: PATH=$PATH:/tmp/bitcoin/bin make itest-parallel tranches=${{ env.TRANCHES }} backend=bitcoind
+ run: PATH=$PATH:/tmp/bitcoin/bin make itest-parallel tranches=${{ env.TRANCHES }} shuffleseed=${{ github.run_id }}
- name: Zip log files on failure
if: ${{ failure() }}
@@ -397,45 +193,10 @@ jobs:
path: logs-itest-macos.zip
retention-days: 5
- ########################
- # check pinned dependencies
- ########################
- dep-pin:
- name: check pinned dependencies
- runs-on: ubuntu-latest
- strategy:
- # Allow other tests in the matrix to continue if one fails.
- fail-fast: false
- matrix:
- pinned_dep:
- - google.golang.org/grpc v1.59.0
- - github.com/golang/protobuf v1.5.3
-
- steps:
- - name: git checkout
- uses: actions/checkout@v3
-
- - name: ensure dependencies at correct version
- run: if ! grep -q "${{ matrix.pinned_dep }}" go.mod; then echo dependency ${{ matrix.pinned_dep }} should not be altered ; exit 1 ; fi
-
- ########################
- # check PR updates release notes
- ########################
- milestone-check:
- name: check release notes updated
- runs-on: ubuntu-latest
- if: '!contains(github.event.pull_request.labels.*.name, ''no-changelog'')'
- steps:
- - name: git checkout
- uses: actions/checkout@v3
-
- - name: release notes check
- run: scripts/check-release-notes.sh
-
# Notify about the completion of all coverage collecting jobs.
finish:
if: ${{ always() }}
- needs: [unit-test, ubuntu-integration-test]
+ needs: [ubuntu-integration-test]
runs-on: ubuntu-latest
steps:
- uses: shogo82148/actions-goveralls@v1
diff --git a/Makefile b/Makefile
index ccf95cb902..101b9aed34 100644
--- a/Makefile
+++ b/Makefile
@@ -208,7 +208,7 @@ endif
itest-only: db-instance
@$(call print, "Running integration tests with ${backend} backend.")
rm -rf itest/*.log itest/.logs-*; date
- EXEC_SUFFIX=$(EXEC_SUFFIX) scripts/itest_part.sh 0 1 $(TEST_FLAGS) $(ITEST_FLAGS) -test.v
+ EXEC_SUFFIX=$(EXEC_SUFFIX) scripts/itest_part.sh 0 1 $(SHUFFLE_SEED) $(TEST_FLAGS) $(ITEST_FLAGS) -test.v
$(COLLECT_ITEST_COVERAGE)
#? itest: Build and run integration tests
@@ -221,7 +221,7 @@ itest-race: build-itest-race itest-only
itest-parallel: build-itest db-instance
@$(call print, "Running tests")
rm -rf itest/*.log itest/.logs-*; date
- EXEC_SUFFIX=$(EXEC_SUFFIX) scripts/itest_parallel.sh $(ITEST_PARALLELISM) $(NUM_ITEST_TRANCHES) $(TEST_FLAGS) $(ITEST_FLAGS)
+ EXEC_SUFFIX=$(EXEC_SUFFIX) scripts/itest_parallel.sh $(ITEST_PARALLELISM) $(NUM_ITEST_TRANCHES) $(SHUFFLE_SEED) $(TEST_FLAGS) $(ITEST_FLAGS)
$(COLLECT_ITEST_COVERAGE)
#? itest-clean: Kill all running itest processes
diff --git a/chainio/README.md b/chainio/README.md
new file mode 100644
index 0000000000..db2c6ba925
--- /dev/null
+++ b/chainio/README.md
@@ -0,0 +1,151 @@
+# Chainio
+
+`chainio` is a package designed to provide blockchain data access to various
+subsystems within `lnd`. When a new block is received, it is encapsulated in a
+`Blockbeat` object and disseminated to all registered consumers. Consumers may
+receive these updates either concurrently or sequentially, based on their
+registration configuration, ensuring that each subsystem maintains a
+synchronized view of the current block state.
+
+The main components include:
+
+- `Blockbeat`: An interface that provides information about the block.
+
+- `Consumer`: An interface that specifies how subsystems handle the blockbeat.
+
+- `BlockbeatDispatcher`: The core service responsible for receiving each block
+ and distributing it to all consumers.
+
+Additionally, the `BeatConsumer` struct provides a partial implementation of
+the `Consumer` interface. This struct helps reduce code duplication, allowing
+subsystems to avoid re-implementing the `ProcessBlock` and
+`NotifyBlockProcessed` methods.
+
+
+### Register a Consumer
+
+Consumers within the same queue are notified **sequentially**, while all queues
+are notified **concurrently**. A queue consists of a slice of consumers, which
+are notified in left-to-right order. Developers are responsible for determining
+dependencies in block consumption across subsystems: independent subsystems
+should be notified concurrently, whereas dependent subsystems should be
+notified sequentially.
+
+To notify the consumers concurrently, put them in different queues,
+```go
+// consumer1 and consumer2 will be notified concurrently.
+queue1 := []chainio.Consumer{consumer1}
+blockbeatDispatcher.RegisterQueue(consumer1)
+
+queue2 := []chainio.Consumer{consumer2}
+blockbeatDispatcher.RegisterQueue(consumer2)
+```
+
+To notify the consumers sequentially, put them in the same queue,
+```go
+// consumers will be notified sequentially via,
+// consumer1 -> consumer2 -> consumer3
+queue := []chainio.Consumer{
+ consumer1,
+ consumer2,
+ consumer3,
+}
+blockbeatDispatcher.RegisterQueue(queue)
+```
+
+### Implement the `Consumer` Interface
+
+Implementing the `Consumer` interface is straightforward. Below is an example
+of how
+[`sweep.TxPublisher`](https://github.com/lightningnetwork/lnd/blob/5cec466fad44c582a64cfaeb91f6d5fd302fcf85/sweep/fee_bumper.go#L310)
+implements this interface.
+
+To start, embed the partial implementation `chainio.BeatConsumer`, which
+already provides the common `ProcessBlock` `NotifyBlockProcessed` methods and
+exposes `BlockbeatChan` for the consumer to receive blockbeats.
+
+```go
+type TxPublisher struct {
+ started atomic.Bool
+ stopped atomic.Bool
+
+ chainio.BeatConsumer
+
+ ...
+```
+
+We should also remember to initialize this `BeatConsumer`,
+
+```go
+...
+// Mount the block consumer.
+tp.BeatConsumer = chainio.NewBeatConsumer(tp.quit, tp.Name())
+```
+
+Finally, in the main event loop, read from `BlockbeatChan`, process the
+received blockbeat, and, crucially, call `beat.NotifyBlockProcessed` to inform
+the blockbeat dispatcher that processing is complete.
+
+```go
+for {
+ select {
+ case beat := <-t.BlockbeatChan:
+ // Consume this blockbeat, usually it means updating the subsystem
+ // using the new block data.
+
+ // Notify we've processed the block.
+ t.NotifyBlockProcessed(beat, nil)
+
+ ...
+```
+
+### Existing Queues
+
+Currently, we have a single queue of consumers dedicated to handling force
+closures. This queue includes `ChainArbitrator`, `UtxoSweeper`, and
+`TxPublisher`, with `ChainArbitrator` managing two internal consumers:
+`chainWatcher` and `ChannelArbitrator`. The blockbeat flows sequentially
+through the chain as follows: `ChainArbitrator => chainWatcher =>
+ChannelArbitrator => UtxoSweeper => TxPublisher`. The following diagram
+illustrates the flow within the public subsystems.
+
+```mermaid
+sequenceDiagram
+ autonumber
+ participant bb as BlockBeat
+ participant cc as ChainArb
+ participant us as UtxoSweeper
+ participant tp as TxPublisher
+
+ note left of bb: 0. received block x,
dispatching...
+
+ note over bb,cc: 1. send block x to ChainArb,
wait for its done signal
+ bb->>cc: block x
+ rect rgba(165, 0, 85, 0.8)
+ critical signal processed
+ cc->>bb: processed block
+ option Process error or timeout
+ bb->>bb: error and exit
+ end
+ end
+
+ note over bb,us: 2. send block x to UtxoSweeper, wait for its done signal
+ bb->>us: block x
+ rect rgba(165, 0, 85, 0.8)
+ critical signal processed
+ us->>bb: processed block
+ option Process error or timeout
+ bb->>bb: error and exit
+ end
+ end
+
+ note over bb,tp: 3. send block x to TxPublisher, wait for its done signal
+ bb->>tp: block x
+ rect rgba(165, 0, 85, 0.8)
+ critical signal processed
+ tp->>bb: processed block
+ option Process error or timeout
+ bb->>bb: error and exit
+ end
+ end
+```
diff --git a/chainio/blockbeat.go b/chainio/blockbeat.go
new file mode 100644
index 0000000000..5df1cad777
--- /dev/null
+++ b/chainio/blockbeat.go
@@ -0,0 +1,55 @@
+package chainio
+
+import (
+ "fmt"
+
+ "github.com/btcsuite/btclog/v2"
+ "github.com/lightningnetwork/lnd/build"
+ "github.com/lightningnetwork/lnd/chainntnfs"
+)
+
+// Beat implements the Blockbeat interface. It contains the block epoch and a
+// customized logger.
+//
+// TODO(yy): extend this to check for confirmation status - which serves as the
+// single source of truth, to avoid the potential race between receiving blocks
+// and `GetTransactionDetails/RegisterSpendNtfn/RegisterConfirmationsNtfn`.
+type Beat struct {
+ // epoch is the current block epoch the blockbeat is aware of.
+ epoch chainntnfs.BlockEpoch
+
+ // log is the customized logger for the blockbeat which prints the
+ // block height.
+ log btclog.Logger
+}
+
+// Compile-time check to ensure Beat satisfies the Blockbeat interface.
+var _ Blockbeat = (*Beat)(nil)
+
+// NewBeat creates a new beat with the specified block epoch and a customized
+// logger.
+func NewBeat(epoch chainntnfs.BlockEpoch) *Beat {
+ b := &Beat{
+ epoch: epoch,
+ }
+
+ // Create a customized logger for the blockbeat.
+ logPrefix := fmt.Sprintf("Height[%6d]:", b.Height())
+ b.log = build.NewPrefixLog(logPrefix, clog)
+
+ return b
+}
+
+// Height returns the height of the block epoch.
+//
+// NOTE: Part of the Blockbeat interface.
+func (b *Beat) Height() int32 {
+ return b.epoch.Height
+}
+
+// logger returns the logger for the blockbeat.
+//
+// NOTE: Part of the private blockbeat interface.
+func (b *Beat) logger() btclog.Logger {
+ return b.log
+}
diff --git a/chainio/blockbeat_test.go b/chainio/blockbeat_test.go
new file mode 100644
index 0000000000..9326651b38
--- /dev/null
+++ b/chainio/blockbeat_test.go
@@ -0,0 +1,28 @@
+package chainio
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/lightningnetwork/lnd/chainntnfs"
+ "github.com/stretchr/testify/require"
+)
+
+var errDummy = errors.New("dummy error")
+
+// TestNewBeat tests the NewBeat and Height functions.
+func TestNewBeat(t *testing.T) {
+ t.Parallel()
+
+ // Create a testing epoch.
+ epoch := chainntnfs.BlockEpoch{
+ Height: 1,
+ }
+
+ // Create the beat and check the internal state.
+ beat := NewBeat(epoch)
+ require.Equal(t, epoch, beat.epoch)
+
+ // Check the height function.
+ require.Equal(t, epoch.Height, beat.Height())
+}
diff --git a/chainio/consumer.go b/chainio/consumer.go
new file mode 100644
index 0000000000..a9ec25745b
--- /dev/null
+++ b/chainio/consumer.go
@@ -0,0 +1,113 @@
+package chainio
+
+// BeatConsumer defines a supplementary component that should be used by
+// subsystems which implement the `Consumer` interface. It partially implements
+// the `Consumer` interface by providing the method `ProcessBlock` such that
+// subsystems don't need to re-implement it.
+//
+// While inheritance is not commonly used in Go, subsystems embedding this
+// struct cannot pass the interface check for `Consumer` because the `Name`
+// method is not implemented, which gives us a "mortise and tenon" structure.
+// In addition to reducing code duplication, this design allows `ProcessBlock`
+// to work on the concrete type `Beat` to access its internal states.
+type BeatConsumer struct {
+ // BlockbeatChan is a channel to receive blocks from Blockbeat. The
+ // received block contains the best known height and the txns confirmed
+ // in this block.
+ BlockbeatChan chan Blockbeat
+
+ // name is the name of the consumer which embeds the BlockConsumer.
+ name string
+
+ // quit is a channel that closes when the BlockConsumer is shutting
+ // down.
+ //
+ // NOTE: this quit channel should be mounted to the same quit channel
+ // used by the subsystem.
+ quit chan struct{}
+
+ // errChan is a buffered chan that receives an error returned from
+ // processing this block.
+ errChan chan error
+}
+
+// NewBeatConsumer creates a new BlockConsumer.
+func NewBeatConsumer(quit chan struct{}, name string) BeatConsumer {
+ // Refuse to start `lnd` if the quit channel is not initialized. We
+ // treat this case as if we are facing a nil pointer dereference, as
+ // there's no point to return an error here, which will cause the node
+ // to fail to be started anyway.
+ if quit == nil {
+ panic("quit channel is nil")
+ }
+
+ b := BeatConsumer{
+ BlockbeatChan: make(chan Blockbeat),
+ name: name,
+ errChan: make(chan error, 1),
+ quit: quit,
+ }
+
+ return b
+}
+
+// ProcessBlock takes a blockbeat and sends it to the consumer's blockbeat
+// channel. It will send it to the subsystem's BlockbeatChan, and block until
+// the processed result is received from the subsystem. The subsystem must call
+// `NotifyBlockProcessed` after it has finished processing the block.
+//
+// NOTE: part of the `chainio.Consumer` interface.
+func (b *BeatConsumer) ProcessBlock(beat Blockbeat) error {
+ // Update the current height.
+ beat.logger().Tracef("set current height for [%s]", b.name)
+
+ select {
+ // Send the beat to the blockbeat channel. It's expected that the
+ // consumer will read from this channel and process the block. Once
+ // processed, it should return the error or nil to the beat.Err chan.
+ case b.BlockbeatChan <- beat:
+ beat.logger().Tracef("Sent blockbeat to [%s]", b.name)
+
+ case <-b.quit:
+ beat.logger().Debugf("[%s] received shutdown before sending "+
+ "beat", b.name)
+
+ return nil
+ }
+
+ // Check the consumer's err chan. We expect the consumer to call
+ // `beat.NotifyBlockProcessed` to send the error back here.
+ select {
+ case err := <-b.errChan:
+ beat.logger().Debugf("[%s] processed beat: err=%v", b.name, err)
+
+ return err
+
+ case <-b.quit:
+ beat.logger().Debugf("[%s] received shutdown", b.name)
+ }
+
+ return nil
+}
+
+// NotifyBlockProcessed signals that the block has been processed. It takes the
+// blockbeat being processed and an error resulted from processing it. This
+// error is then sent back to the consumer's err chan to unblock
+// `ProcessBlock`.
+//
+// NOTE: This method must be called by the subsystem after it has finished
+// processing the block.
+func (b *BeatConsumer) NotifyBlockProcessed(beat Blockbeat, err error) {
+ // Update the current height.
+ beat.logger().Debugf("[%s]: notifying beat processed", b.name)
+
+ select {
+ case b.errChan <- err:
+ beat.logger().Debugf("[%s]: notified beat processed, err=%v",
+ b.name, err)
+
+ case <-b.quit:
+ beat.logger().Debugf("[%s] received shutdown before notifying "+
+ "beat processed", b.name)
+ }
+}
diff --git a/chainio/consumer_test.go b/chainio/consumer_test.go
new file mode 100644
index 0000000000..3ef79b61b4
--- /dev/null
+++ b/chainio/consumer_test.go
@@ -0,0 +1,202 @@
+package chainio
+
+import (
+ "testing"
+ "time"
+
+ "github.com/lightningnetwork/lnd/fn"
+ "github.com/stretchr/testify/require"
+)
+
+// TestNewBeatConsumer tests the NewBeatConsumer function.
+func TestNewBeatConsumer(t *testing.T) {
+ t.Parallel()
+
+ quitChan := make(chan struct{})
+ name := "test"
+
+ // Test the NewBeatConsumer function.
+ b := NewBeatConsumer(quitChan, name)
+
+ // Assert the state.
+ require.Equal(t, quitChan, b.quit)
+ require.Equal(t, name, b.name)
+ require.NotNil(t, b.BlockbeatChan)
+}
+
+// TestProcessBlockSuccess tests when the block is processed successfully, no
+// error is returned.
+func TestProcessBlockSuccess(t *testing.T) {
+ t.Parallel()
+
+ // Create a test consumer.
+ quitChan := make(chan struct{})
+ b := NewBeatConsumer(quitChan, "test")
+
+ // Create a mock beat.
+ mockBeat := &MockBlockbeat{}
+ defer mockBeat.AssertExpectations(t)
+ mockBeat.On("logger").Return(clog)
+
+ // Mock the consumer's err chan.
+ consumerErrChan := make(chan error, 1)
+ b.errChan = consumerErrChan
+
+ // Call the method under test.
+ resultChan := make(chan error, 1)
+ go func() {
+ resultChan <- b.ProcessBlock(mockBeat)
+ }()
+
+ // Assert the beat is sent to the blockbeat channel.
+ beat, err := fn.RecvOrTimeout(b.BlockbeatChan, time.Second)
+ require.NoError(t, err)
+ require.Equal(t, mockBeat, beat)
+
+ // Send nil to the consumer's error channel.
+ consumerErrChan <- nil
+
+ // Assert the result of ProcessBlock is nil.
+ result, err := fn.RecvOrTimeout(resultChan, time.Second)
+ require.NoError(t, err)
+ require.Nil(t, result)
+}
+
+// TestProcessBlockConsumerQuitBeforeSend tests when the consumer is quit
+// before sending the beat, the method returns immediately.
+func TestProcessBlockConsumerQuitBeforeSend(t *testing.T) {
+ t.Parallel()
+
+ // Create a test consumer.
+ quitChan := make(chan struct{})
+ b := NewBeatConsumer(quitChan, "test")
+
+ // Create a mock beat.
+ mockBeat := &MockBlockbeat{}
+ defer mockBeat.AssertExpectations(t)
+ mockBeat.On("logger").Return(clog)
+
+ // Call the method under test.
+ resultChan := make(chan error, 1)
+ go func() {
+ resultChan <- b.ProcessBlock(mockBeat)
+ }()
+
+ // Instead of reading the BlockbeatChan, close the quit channel.
+ close(quitChan)
+
+ // Assert ProcessBlock returned nil.
+ result, err := fn.RecvOrTimeout(resultChan, time.Second)
+ require.NoError(t, err)
+ require.Nil(t, result)
+}
+
+// TestProcessBlockConsumerQuitAfterSend tests when the consumer is quit after
+// sending the beat, the method returns immediately.
+func TestProcessBlockConsumerQuitAfterSend(t *testing.T) {
+ t.Parallel()
+
+ // Create a test consumer.
+ quitChan := make(chan struct{})
+ b := NewBeatConsumer(quitChan, "test")
+
+ // Create a mock beat.
+ mockBeat := &MockBlockbeat{}
+ defer mockBeat.AssertExpectations(t)
+ mockBeat.On("logger").Return(clog)
+
+ // Mock the consumer's err chan.
+ consumerErrChan := make(chan error, 1)
+ b.errChan = consumerErrChan
+
+ // Call the method under test.
+ resultChan := make(chan error, 1)
+ go func() {
+ resultChan <- b.ProcessBlock(mockBeat)
+ }()
+
+ // Assert the beat is sent to the blockbeat channel.
+ beat, err := fn.RecvOrTimeout(b.BlockbeatChan, time.Second)
+ require.NoError(t, err)
+ require.Equal(t, mockBeat, beat)
+
+ // Instead of sending nil to the consumer's error channel, close the
+ // quit chanel.
+ close(quitChan)
+
+ // Assert ProcessBlock returned nil.
+ result, err := fn.RecvOrTimeout(resultChan, time.Second)
+ require.NoError(t, err)
+ require.Nil(t, result)
+}
+
+// TestNotifyBlockProcessedSendErr asserts the error can be sent and read by
+// the beat via NotifyBlockProcessed.
+func TestNotifyBlockProcessedSendErr(t *testing.T) {
+ t.Parallel()
+
+ // Create a test consumer.
+ quitChan := make(chan struct{})
+ b := NewBeatConsumer(quitChan, "test")
+
+ // Create a mock beat.
+ mockBeat := &MockBlockbeat{}
+ defer mockBeat.AssertExpectations(t)
+ mockBeat.On("logger").Return(clog)
+
+ // Mock the consumer's err chan.
+ consumerErrChan := make(chan error, 1)
+ b.errChan = consumerErrChan
+
+ // Call the method under test.
+ done := make(chan error)
+ go func() {
+ defer close(done)
+ b.NotifyBlockProcessed(mockBeat, errDummy)
+ }()
+
+ // Assert the error is sent to the beat's err chan.
+ result, err := fn.RecvOrTimeout(consumerErrChan, time.Second)
+ require.NoError(t, err)
+ require.ErrorIs(t, result, errDummy)
+
+ // Assert the done channel is closed.
+ result, err = fn.RecvOrTimeout(done, time.Second)
+ require.NoError(t, err)
+ require.Nil(t, result)
+}
+
+// TestNotifyBlockProcessedOnQuit asserts NotifyBlockProcessed exits
+// immediately when the quit channel is closed.
+func TestNotifyBlockProcessedOnQuit(t *testing.T) {
+ t.Parallel()
+
+ // Create a test consumer.
+ quitChan := make(chan struct{})
+ b := NewBeatConsumer(quitChan, "test")
+
+ // Create a mock beat.
+ mockBeat := &MockBlockbeat{}
+ defer mockBeat.AssertExpectations(t)
+ mockBeat.On("logger").Return(clog)
+
+ // Mock the consumer's err chan - we don't buffer it so it will block
+ // on sending the error.
+ consumerErrChan := make(chan error)
+ b.errChan = consumerErrChan
+
+ // Call the method under test.
+ done := make(chan error)
+ go func() {
+ defer close(done)
+ b.NotifyBlockProcessed(mockBeat, errDummy)
+ }()
+
+ // Close the quit channel so the method will return.
+ close(b.quit)
+
+ // Assert the done channel is closed.
+ result, err := fn.RecvOrTimeout(done, time.Second)
+ require.NoError(t, err)
+ require.Nil(t, result)
+}
diff --git a/chainio/dispatcher.go b/chainio/dispatcher.go
new file mode 100644
index 0000000000..269bb1892c
--- /dev/null
+++ b/chainio/dispatcher.go
@@ -0,0 +1,301 @@
+package chainio
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/btcsuite/btclog/v2"
+ "github.com/lightningnetwork/lnd/chainntnfs"
+ "github.com/lightningnetwork/lnd/lnutils"
+ "golang.org/x/sync/errgroup"
+)
+
+// DefaultProcessBlockTimeout is the timeout value used when waiting for one
+// consumer to finish processing the new block epoch.
+var DefaultProcessBlockTimeout = 60 * time.Second
+
+// ErrProcessBlockTimeout is the error returned when a consumer takes too long
+// to process the block.
+var ErrProcessBlockTimeout = errors.New("process block timeout")
+
+// BlockbeatDispatcher is a service that handles dispatching new blocks to
+// `lnd`'s subsystems. During startup, subsystems that are block-driven should
+// implement the `Consumer` interface and register themselves via
+// `RegisterQueue`. When two subsystems are independent of each other, they
+// should be registered in different queues so blocks are notified concurrently.
+// Otherwise, when living in the same queue, the subsystems are notified of the
+// new blocks sequentially, which means it's critical to understand the
+// relationship of these systems to properly handle the order.
+type BlockbeatDispatcher struct {
+ wg sync.WaitGroup
+
+ // notifier is used to receive new block epochs.
+ notifier chainntnfs.ChainNotifier
+
+ // beat is the latest blockbeat received.
+ beat Blockbeat
+
+ // consumerQueues is a map of consumers that will receive blocks. Its
+ // key is a unique counter and its value is a queue of consumers. Each
+ // queue is notified concurrently, and consumers in the same queue is
+ // notified sequentially.
+ consumerQueues map[uint32][]Consumer
+
+ // counter is used to assign a unique id to each queue.
+ counter atomic.Uint32
+
+ // quit is used to signal the BlockbeatDispatcher to stop.
+ quit chan struct{}
+}
+
+// NewBlockbeatDispatcher returns a new blockbeat dispatcher instance.
+func NewBlockbeatDispatcher(n chainntnfs.ChainNotifier) *BlockbeatDispatcher {
+ return &BlockbeatDispatcher{
+ notifier: n,
+ quit: make(chan struct{}),
+ consumerQueues: make(map[uint32][]Consumer),
+ }
+}
+
+// RegisterQueue takes a list of consumers and registers them in the same
+// queue.
+//
+// NOTE: these consumers are notified sequentially.
+func (b *BlockbeatDispatcher) RegisterQueue(consumers []Consumer) {
+ qid := b.counter.Add(1)
+
+ b.consumerQueues[qid] = append(b.consumerQueues[qid], consumers...)
+ clog.Infof("Registered queue=%d with %d blockbeat consumers", qid,
+ len(consumers))
+
+ for _, c := range consumers {
+ clog.Debugf("Consumer [%s] registered in queue %d", c.Name(),
+ qid)
+ }
+}
+
+// Start starts the blockbeat dispatcher - it registers a block notification
+// and monitors and dispatches new blocks in a goroutine. It will refuse to
+// start if there are no registered consumers.
+func (b *BlockbeatDispatcher) Start() error {
+ // Make sure consumers are registered.
+ if len(b.consumerQueues) == 0 {
+ return fmt.Errorf("no consumers registered")
+ }
+
+ // Start listening to new block epochs. We should get a notification
+ // with the current best block immediately.
+ blockEpochs, err := b.notifier.RegisterBlockEpochNtfn(nil)
+ if err != nil {
+ return fmt.Errorf("register block epoch ntfn: %w", err)
+ }
+
+ clog.Infof("BlockbeatDispatcher is starting with %d consumer queues",
+ len(b.consumerQueues))
+ defer clog.Debug("BlockbeatDispatcher started")
+
+ b.wg.Add(1)
+ go b.dispatchBlocks(blockEpochs)
+
+ return nil
+}
+
+// Stop shuts down the blockbeat dispatcher.
+func (b *BlockbeatDispatcher) Stop() {
+ clog.Info("BlockbeatDispatcher is stopping")
+ defer clog.Debug("BlockbeatDispatcher stopped")
+
+ // Signal the dispatchBlocks goroutine to stop.
+ close(b.quit)
+ b.wg.Wait()
+}
+
+func (b *BlockbeatDispatcher) log() btclog.Logger {
+ return b.beat.logger()
+}
+
+// dispatchBlocks listens to new block epoch and dispatches it to all the
+// consumers. Each queue is notified concurrently, and the consumers in the
+// same queue are notified sequentially.
+//
+// NOTE: Must be run as a goroutine.
+func (b *BlockbeatDispatcher) dispatchBlocks(
+ blockEpochs *chainntnfs.BlockEpochEvent) {
+
+ defer b.wg.Done()
+ defer blockEpochs.Cancel()
+
+ for {
+ select {
+ case blockEpoch, ok := <-blockEpochs.Epochs:
+ if !ok {
+ clog.Debugf("Block epoch channel closed")
+
+ return
+ }
+
+ // Log a separator so it's easier to identify when a
+ // new block arrives for subsystems.
+ clog.Debugf("%v", lnutils.NewSeparatorClosure())
+
+ clog.Infof("Received new block %v at height %d, "+
+ "notifying consumers...", blockEpoch.Hash,
+ blockEpoch.Height)
+
+ // Record the time it takes the consumer to process
+ // this block.
+ start := time.Now()
+
+ // Update the current block epoch.
+ b.beat = NewBeat(*blockEpoch)
+
+ // Notify all consumers.
+ err := b.notifyQueues()
+ if err != nil {
+ b.log().Errorf("Notify block failed: %v", err)
+ }
+
+ b.log().Infof("Notified all consumers on new block "+
+ "in %v", time.Since(start))
+
+ case <-b.quit:
+ b.log().Debugf("BlockbeatDispatcher quit signal " +
+ "received")
+
+ return
+ }
+ }
+}
+
+// notifyQueues notifies each queue concurrently about the latest block epoch.
+func (b *BlockbeatDispatcher) notifyQueues() error {
+ // errChans is a map of channels that will be used to receive errors
+ // returned from notifying the consumers.
+ errChans := make(map[uint32]chan error, len(b.consumerQueues))
+
+ // Notify each queue in goroutines.
+ for qid, consumers := range b.consumerQueues {
+ b.log().Debugf("Notifying queue=%d with %d consumers", qid,
+ len(consumers))
+
+ // Create a signal chan.
+ errChan := make(chan error, 1)
+ errChans[qid] = errChan
+
+ // Notify each queue concurrently.
+ go func(qid uint32, c []Consumer, beat Blockbeat) {
+ // Notify each consumer in this queue sequentially.
+ errChan <- DispatchSequential(beat, c)
+ }(qid, consumers, b.beat)
+ }
+
+ // Wait for all consumers in each queue to finish.
+ for qid, errChan := range errChans {
+ select {
+ case err := <-errChan:
+ if err != nil {
+ return fmt.Errorf("queue=%d got err: %w", qid,
+ err)
+ }
+
+ b.log().Debugf("Notified queue=%d", qid)
+
+ case <-b.quit:
+ b.log().Debugf("BlockbeatDispatcher quit signal " +
+ "received, exit notifyQueues")
+
+ return nil
+ }
+ }
+
+ return nil
+}
+
+// DispatchSequential takes a list of consumers and notify them about the new
+// epoch sequentially. It requires the consumer to finish processing the block
+// within the specified time, otherwise a timeout error is returned.
+func DispatchSequential(b Blockbeat, consumers []Consumer) error {
+ for _, c := range consumers {
+ // Send the beat to the consumer.
+ err := notifyAndWait(b, c, DefaultProcessBlockTimeout)
+ if err != nil {
+ b.logger().Errorf("Failed to process block: %v", err)
+
+ return err
+ }
+ }
+
+ return nil
+}
+
+// DispatchConcurrent notifies each consumer concurrently about the blockbeat.
+// It requires the consumer to finish processing the block within the specified
+// time, otherwise a timeout error is returned.
+func DispatchConcurrent(b Blockbeat, consumers []Consumer) error {
+ eg := &errgroup.Group{}
+
+ // Notify each queue in goroutines.
+ for _, c := range consumers {
+ // Notify each consumer concurrently.
+ eg.Go(func() error {
+ // Send the beat to the consumer.
+ err := notifyAndWait(b, c, DefaultProcessBlockTimeout)
+
+ // Exit early if there's no error.
+ if err == nil {
+ return nil
+ }
+
+ b.logger().Errorf("Consumer=%v failed to process "+
+ "block: %v", c.Name(), err)
+
+ return err
+ })
+ }
+
+ // Wait for all consumers in each queue to finish.
+ if err := eg.Wait(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// notifyAndWait sends the blockbeat to the specified consumer. It requires the
+// consumer to finish processing the block within the specified time, otherwise
+// a timeout error is returned.
+func notifyAndWait(b Blockbeat, c Consumer, timeout time.Duration) error {
+ b.logger().Debugf("Waiting for consumer[%s] to process it", c.Name())
+
+ // Record the time it takes the consumer to process this block.
+ start := time.Now()
+
+ errChan := make(chan error, 1)
+ go func() {
+ errChan <- c.ProcessBlock(b)
+ }()
+
+ // We expect the consumer to finish processing this block under 30s,
+ // otherwise a timeout error is returned.
+ select {
+ case err := <-errChan:
+ if err == nil {
+ break
+ }
+
+ return fmt.Errorf("%s got err in ProcessBlock: %w", c.Name(),
+ err)
+
+ case <-time.After(timeout):
+ return fmt.Errorf("consumer %s: %w", c.Name(),
+ ErrProcessBlockTimeout)
+ }
+
+ b.logger().Debugf("Consumer[%s] processed block in %v", c.Name(),
+ time.Since(start))
+
+ return nil
+}
diff --git a/chainio/dispatcher_test.go b/chainio/dispatcher_test.go
new file mode 100644
index 0000000000..88044c0201
--- /dev/null
+++ b/chainio/dispatcher_test.go
@@ -0,0 +1,383 @@
+package chainio
+
+import (
+ "testing"
+ "time"
+
+ "github.com/lightningnetwork/lnd/chainntnfs"
+ "github.com/lightningnetwork/lnd/fn"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+)
+
+// TestNotifyAndWaitOnConsumerErr asserts when the consumer returns an error,
+// it's returned by notifyAndWait.
+func TestNotifyAndWaitOnConsumerErr(t *testing.T) {
+ t.Parallel()
+
+ // Create a mock consumer.
+ consumer := &MockConsumer{}
+ defer consumer.AssertExpectations(t)
+ consumer.On("Name").Return("mocker")
+
+ // Create a mock beat.
+ mockBeat := &MockBlockbeat{}
+ defer mockBeat.AssertExpectations(t)
+ mockBeat.On("logger").Return(clog)
+
+ // Mock ProcessBlock to return an error.
+ consumer.On("ProcessBlock", mockBeat).Return(errDummy).Once()
+
+ // Call the method under test.
+ err := notifyAndWait(mockBeat, consumer, DefaultProcessBlockTimeout)
+
+ // We expect the error to be returned.
+ require.ErrorIs(t, err, errDummy)
+}
+
+// TestNotifyAndWaitOnConsumerErr asserts when the consumer successfully
+// processed the beat, no error is returned.
+func TestNotifyAndWaitOnConsumerSuccess(t *testing.T) {
+ t.Parallel()
+
+ // Create a mock consumer.
+ consumer := &MockConsumer{}
+ defer consumer.AssertExpectations(t)
+ consumer.On("Name").Return("mocker")
+
+ // Create a mock beat.
+ mockBeat := &MockBlockbeat{}
+ defer mockBeat.AssertExpectations(t)
+ mockBeat.On("logger").Return(clog)
+
+ // Mock ProcessBlock to return nil.
+ consumer.On("ProcessBlock", mockBeat).Return(nil).Once()
+
+ // Call the method under test.
+ err := notifyAndWait(mockBeat, consumer, DefaultProcessBlockTimeout)
+
+ // We expect a nil error to be returned.
+ require.NoError(t, err)
+}
+
+// TestNotifyAndWaitOnConsumerTimeout asserts when the consumer times out
+// processing the block, the timeout error is returned.
+func TestNotifyAndWaitOnConsumerTimeout(t *testing.T) {
+ t.Parallel()
+
+ // Set timeout to be 10ms.
+ processBlockTimeout := 10 * time.Millisecond
+
+ // Create a mock consumer.
+ consumer := &MockConsumer{}
+ defer consumer.AssertExpectations(t)
+ consumer.On("Name").Return("mocker")
+
+ // Create a mock beat.
+ mockBeat := &MockBlockbeat{}
+ defer mockBeat.AssertExpectations(t)
+ mockBeat.On("logger").Return(clog)
+
+ // Mock ProcessBlock to return nil but blocks on returning.
+ consumer.On("ProcessBlock", mockBeat).Return(nil).Run(
+ func(args mock.Arguments) {
+ // Sleep one second to block on the method.
+ time.Sleep(processBlockTimeout * 100)
+ }).Once()
+
+ // Call the method under test.
+ err := notifyAndWait(mockBeat, consumer, processBlockTimeout)
+
+ // We expect a timeout error to be returned.
+ require.ErrorIs(t, err, ErrProcessBlockTimeout)
+}
+
+// TestDispatchSequential checks that the beat is sent to the consumers
+// sequentially.
+func TestDispatchSequential(t *testing.T) {
+ t.Parallel()
+
+ // Create three mock consumers.
+ consumer1 := &MockConsumer{}
+ defer consumer1.AssertExpectations(t)
+ consumer1.On("Name").Return("mocker1")
+
+ consumer2 := &MockConsumer{}
+ defer consumer2.AssertExpectations(t)
+ consumer2.On("Name").Return("mocker2")
+
+ consumer3 := &MockConsumer{}
+ defer consumer3.AssertExpectations(t)
+ consumer3.On("Name").Return("mocker3")
+
+ consumers := []Consumer{consumer1, consumer2, consumer3}
+
+ // Create a mock beat.
+ mockBeat := &MockBlockbeat{}
+ defer mockBeat.AssertExpectations(t)
+ mockBeat.On("logger").Return(clog)
+
+ // prevConsumer specifies the previous consumer that was called.
+ var prevConsumer string
+
+ // Mock the ProcessBlock on consumers to reutrn immediately.
+ consumer1.On("ProcessBlock", mockBeat).Return(nil).Run(
+ func(args mock.Arguments) {
+ // Check the order of the consumers.
+ //
+ // The first consumer should have no previous consumer.
+ require.Empty(t, prevConsumer)
+
+ // Set the consumer as the previous consumer.
+ prevConsumer = consumer1.Name()
+ }).Once()
+
+ consumer2.On("ProcessBlock", mockBeat).Return(nil).Run(
+ func(args mock.Arguments) {
+ // Check the order of the consumers.
+ //
+ // The second consumer should see consumer1.
+ require.Equal(t, consumer1.Name(), prevConsumer)
+
+ // Set the consumer as the previous consumer.
+ prevConsumer = consumer2.Name()
+ }).Once()
+
+ consumer3.On("ProcessBlock", mockBeat).Return(nil).Run(
+ func(args mock.Arguments) {
+ // Check the order of the consumers.
+ //
+ // The third consumer should see consumer2.
+ require.Equal(t, consumer2.Name(), prevConsumer)
+
+ // Set the consumer as the previous consumer.
+ prevConsumer = consumer3.Name()
+ }).Once()
+
+ // Call the method under test.
+ err := DispatchSequential(mockBeat, consumers)
+ require.NoError(t, err)
+
+ // Check the previous consumer is the last consumer.
+ require.Equal(t, consumer3.Name(), prevConsumer)
+}
+
+// TestRegisterQueue tests the RegisterQueue function.
+func TestRegisterQueue(t *testing.T) {
+ t.Parallel()
+
+ // Create two mock consumers.
+ consumer1 := &MockConsumer{}
+ defer consumer1.AssertExpectations(t)
+ consumer1.On("Name").Return("mocker1")
+
+ consumer2 := &MockConsumer{}
+ defer consumer2.AssertExpectations(t)
+ consumer2.On("Name").Return("mocker2")
+
+ consumers := []Consumer{consumer1, consumer2}
+
+ // Create a mock chain notifier.
+ mockNotifier := &chainntnfs.MockChainNotifier{}
+ defer mockNotifier.AssertExpectations(t)
+
+ // Create a new dispatcher.
+ b := NewBlockbeatDispatcher(mockNotifier)
+
+ // Register the consumers.
+ b.RegisterQueue(consumers)
+
+ // Assert that the consumers have been registered.
+ //
+ // We should have one queue.
+ require.Len(t, b.consumerQueues, 1)
+
+ // The queue should have two consumers.
+ queue, ok := b.consumerQueues[1]
+ require.True(t, ok)
+ require.Len(t, queue, 2)
+}
+
+// TestStartDispatcher tests the Start method.
+func TestStartDispatcher(t *testing.T) {
+ t.Parallel()
+
+ // Create a mock chain notifier.
+ mockNotifier := &chainntnfs.MockChainNotifier{}
+ defer mockNotifier.AssertExpectations(t)
+
+ // Create a new dispatcher.
+ b := NewBlockbeatDispatcher(mockNotifier)
+
+ // Start the dispatcher without consumers should return an error.
+ err := b.Start()
+ require.Error(t, err)
+
+ // Create a consumer and register it.
+ consumer := &MockConsumer{}
+ defer consumer.AssertExpectations(t)
+ consumer.On("Name").Return("mocker1")
+ b.RegisterQueue([]Consumer{consumer})
+
+ // Mock the chain notifier to return an error.
+ mockNotifier.On("RegisterBlockEpochNtfn",
+ mock.Anything).Return(nil, errDummy).Once()
+
+ // Start the dispatcher now should return the error.
+ err = b.Start()
+ require.ErrorIs(t, err, errDummy)
+
+ // Mock the chain notifier to return a valid notifier.
+ blockEpochs := &chainntnfs.BlockEpochEvent{}
+ mockNotifier.On("RegisterBlockEpochNtfn",
+ mock.Anything).Return(blockEpochs, nil).Once()
+
+ // Start the dispatcher now should not return an error.
+ err = b.Start()
+ require.NoError(t, err)
+}
+
+// TestDispatchBlocks asserts the blocks are properly dispatched to the queues.
+func TestDispatchBlocks(t *testing.T) {
+ t.Parallel()
+
+ // Create a mock chain notifier.
+ mockNotifier := &chainntnfs.MockChainNotifier{}
+ defer mockNotifier.AssertExpectations(t)
+
+ // Create a new dispatcher.
+ b := NewBlockbeatDispatcher(mockNotifier)
+
+ // Create the beat and attach it to the dispatcher.
+ epoch := chainntnfs.BlockEpoch{Height: 1}
+ beat := NewBeat(epoch)
+ b.beat = beat
+
+ // Create a consumer and register it.
+ consumer := &MockConsumer{}
+ defer consumer.AssertExpectations(t)
+ consumer.On("Name").Return("mocker1")
+ b.RegisterQueue([]Consumer{consumer})
+
+ // Mock the consumer to return nil error on ProcessBlock. This
+ // implictly asserts that the step `notifyQueues` is successfully
+ // reached in the `dispatchBlocks` method.
+ consumer.On("ProcessBlock", mock.Anything).Return(nil).Once()
+
+ // Create a test epoch chan.
+ epochChan := make(chan *chainntnfs.BlockEpoch, 1)
+ blockEpochs := &chainntnfs.BlockEpochEvent{
+ Epochs: epochChan,
+ Cancel: func() {},
+ }
+
+ // Call the method in a goroutine.
+ done := make(chan struct{})
+ b.wg.Add(1)
+ go func() {
+ defer close(done)
+ b.dispatchBlocks(blockEpochs)
+ }()
+
+ // Send an epoch.
+ epoch = chainntnfs.BlockEpoch{Height: 2}
+ epochChan <- &epoch
+
+ // Wait for the dispatcher to process the epoch.
+ time.Sleep(100 * time.Millisecond)
+
+ // Stop the dispatcher.
+ b.Stop()
+
+ // We expect the dispatcher to stop immediately.
+ _, err := fn.RecvOrTimeout(done, time.Second)
+ require.NoError(t, err)
+}
+
+// TestNotifyQueuesSuccess checks when the dispatcher successfully notifies all
+// the queues, no error is returned.
+func TestNotifyQueuesSuccess(t *testing.T) {
+ t.Parallel()
+
+ // Create two mock consumers.
+ consumer1 := &MockConsumer{}
+ defer consumer1.AssertExpectations(t)
+ consumer1.On("Name").Return("mocker1")
+
+ consumer2 := &MockConsumer{}
+ defer consumer2.AssertExpectations(t)
+ consumer2.On("Name").Return("mocker2")
+
+ // Create two queues.
+ queue1 := []Consumer{consumer1}
+ queue2 := []Consumer{consumer2}
+
+ // Create a mock chain notifier.
+ mockNotifier := &chainntnfs.MockChainNotifier{}
+ defer mockNotifier.AssertExpectations(t)
+
+ // Create a mock beat.
+ mockBeat := &MockBlockbeat{}
+ defer mockBeat.AssertExpectations(t)
+ mockBeat.On("logger").Return(clog)
+
+ // Create a new dispatcher.
+ b := NewBlockbeatDispatcher(mockNotifier)
+
+ // Register the queues.
+ b.RegisterQueue(queue1)
+ b.RegisterQueue(queue2)
+
+ // Attach the blockbeat.
+ b.beat = mockBeat
+
+ // Mock the consumers to return nil error on ProcessBlock for
+ // both calls.
+ consumer1.On("ProcessBlock", mockBeat).Return(nil).Once()
+ consumer2.On("ProcessBlock", mockBeat).Return(nil).Once()
+
+ // Notify the queues. The mockers will be asserted in the end to
+ // validate the calls.
+ err := b.notifyQueues()
+ require.NoError(t, err)
+}
+
+// TestNotifyQueuesError checks when one of the queue returns an error, this
+// error is returned by the method.
+func TestNotifyQueuesError(t *testing.T) {
+ t.Parallel()
+
+ // Create a mock consumer.
+ consumer := &MockConsumer{}
+ defer consumer.AssertExpectations(t)
+ consumer.On("Name").Return("mocker1")
+
+ // Create one queue.
+ queue := []Consumer{consumer}
+
+ // Create a mock chain notifier.
+ mockNotifier := &chainntnfs.MockChainNotifier{}
+ defer mockNotifier.AssertExpectations(t)
+
+ // Create a mock beat.
+ mockBeat := &MockBlockbeat{}
+ defer mockBeat.AssertExpectations(t)
+ mockBeat.On("logger").Return(clog)
+
+ // Create a new dispatcher.
+ b := NewBlockbeatDispatcher(mockNotifier)
+
+ // Register the queues.
+ b.RegisterQueue(queue)
+
+ // Attach the blockbeat.
+ b.beat = mockBeat
+
+ // Mock the consumer to return an error on ProcessBlock.
+ consumer.On("ProcessBlock", mockBeat).Return(errDummy).Once()
+
+ // Notify the queues. The mockers will be asserted in the end to
+ // validate the calls.
+ err := b.notifyQueues()
+ require.ErrorIs(t, err, errDummy)
+}
diff --git a/chainio/interface.go b/chainio/interface.go
new file mode 100644
index 0000000000..03c09faf7c
--- /dev/null
+++ b/chainio/interface.go
@@ -0,0 +1,53 @@
+package chainio
+
+import "github.com/btcsuite/btclog/v2"
+
+// Blockbeat defines an interface that can be used by subsystems to retrieve
+// block data. It is sent by the BlockbeatDispatcher to all the registered
+// consumers whenever a new block is received. Once the consumer finishes
+// processing the block, it must signal it by calling `NotifyBlockProcessed`.
+//
+// The blockchain is a state machine - whenever there's a state change, it's
+// manifested in a block. The blockbeat is a way to notify subsystems of this
+// state change, and to provide them with the data they need to process it. In
+// other words, subsystems must react to this state change and should consider
+// being driven by the blockbeat in their own state machines.
+type Blockbeat interface {
+ // blockbeat is a private interface that's only used in this package.
+ blockbeat
+
+ // Height returns the current block height.
+ Height() int32
+}
+
+// blockbeat defines a set of private methods used in this package to make
+// interaction with the blockbeat easier.
+type blockbeat interface {
+ // logger returns the internal logger used by the blockbeat which has a
+ // block height prefix.
+ logger() btclog.Logger
+}
+
+// Consumer defines a blockbeat consumer interface. Subsystems that need block
+// info must implement it.
+type Consumer interface {
+ // TODO(yy): We should also define the start methods used by the
+ // consumers such that when implementing the interface, the consumer
+ // will always be started with a blockbeat. This cannot be enforced at
+ // the moment as we need refactor all the start methods to only take a
+ // beat.
+ //
+ // Start(beat Blockbeat) error
+
+ // Name returns a human-readable string for this subsystem.
+ Name() string
+
+ // ProcessBlock takes a blockbeat and processes it. It should not
+ // return until the subsystem has updated its state based on the block
+ // data.
+ //
+ // NOTE: The consumer must try its best to NOT return an error. If an
+ // error is returned from processing the block, it means the subsystem
+ // cannot react to onchain state changes and lnd will shutdown.
+ ProcessBlock(b Blockbeat) error
+}
diff --git a/chainio/log.go b/chainio/log.go
new file mode 100644
index 0000000000..2d8c26f7a5
--- /dev/null
+++ b/chainio/log.go
@@ -0,0 +1,32 @@
+package chainio
+
+import (
+ "github.com/btcsuite/btclog/v2"
+ "github.com/lightningnetwork/lnd/build"
+)
+
+// Subsystem defines the logging code for this subsystem.
+const Subsystem = "CHIO"
+
+// clog is a logger that is initialized with no output filters. This means the
+// package will not perform any logging by default until the caller requests
+// it.
+var clog btclog.Logger
+
+// The default amount of logging is none.
+func init() {
+ UseLogger(build.NewSubLogger(Subsystem, nil))
+}
+
+// DisableLog disables all library log output. Logging output is disabled by
+// default until UseLogger is called.
+func DisableLog() {
+ UseLogger(btclog.Disabled)
+}
+
+// UseLogger uses a specified Logger to output package logging info. This
+// should be used in preference to SetLogWriter if the caller is also using
+// btclog.
+func UseLogger(logger btclog.Logger) {
+ clog = logger
+}
diff --git a/chainio/mocks.go b/chainio/mocks.go
new file mode 100644
index 0000000000..5677734e1d
--- /dev/null
+++ b/chainio/mocks.go
@@ -0,0 +1,50 @@
+package chainio
+
+import (
+ "github.com/btcsuite/btclog/v2"
+ "github.com/stretchr/testify/mock"
+)
+
+// MockConsumer is a mock implementation of the Consumer interface.
+type MockConsumer struct {
+ mock.Mock
+}
+
+// Compile-time constraint to ensure MockConsumer implements Consumer.
+var _ Consumer = (*MockConsumer)(nil)
+
+// Name returns a human-readable string for this subsystem.
+func (m *MockConsumer) Name() string {
+ args := m.Called()
+ return args.String(0)
+}
+
+// ProcessBlock takes a blockbeat and processes it. A receive-only error chan
+// must be returned.
+func (m *MockConsumer) ProcessBlock(b Blockbeat) error {
+ args := m.Called(b)
+
+ return args.Error(0)
+}
+
+// MockBlockbeat is a mock implementation of the Blockbeat interface.
+type MockBlockbeat struct {
+ mock.Mock
+}
+
+// Compile-time constraint to ensure MockBlockbeat implements Blockbeat.
+var _ Blockbeat = (*MockBlockbeat)(nil)
+
+// Height returns the current block height.
+func (m *MockBlockbeat) Height() int32 {
+ args := m.Called()
+
+ return args.Get(0).(int32)
+}
+
+// logger returns the logger for the blockbeat.
+func (m *MockBlockbeat) logger() btclog.Logger {
+ args := m.Called()
+
+ return args.Get(0).(btclog.Logger)
+}
diff --git a/chainntnfs/bitcoindnotify/bitcoind.go b/chainntnfs/bitcoindnotify/bitcoind.go
index 8bcf8872b1..91c38d8d6e 100644
--- a/chainntnfs/bitcoindnotify/bitcoind.go
+++ b/chainntnfs/bitcoindnotify/bitcoind.go
@@ -665,8 +665,14 @@ func (b *BitcoindNotifier) handleBlockConnected(block chainntnfs.BlockEpoch) err
// satisfy any client requests based upon the new block.
b.bestBlock = block
+ err = b.txNotifier.NotifyHeight(uint32(block.Height))
+ if err != nil {
+ return fmt.Errorf("unable to notify height: %w", err)
+ }
+
b.notifyBlockEpochs(block.Height, block.Hash, block.BlockHeader)
- return b.txNotifier.NotifyHeight(uint32(block.Height))
+
+ return nil
}
// notifyBlockEpochs notifies all registered block epoch clients of the newly
diff --git a/chainntnfs/btcdnotify/btcd.go b/chainntnfs/btcdnotify/btcd.go
index bcbfa571a5..ff91b5aee6 100644
--- a/chainntnfs/btcdnotify/btcd.go
+++ b/chainntnfs/btcdnotify/btcd.go
@@ -725,11 +725,16 @@ func (b *BtcdNotifier) handleBlockConnected(epoch chainntnfs.BlockEpoch) error {
// satisfy any client requests based upon the new block.
b.bestBlock = epoch
+ err = b.txNotifier.NotifyHeight(uint32(epoch.Height))
+ if err != nil {
+ return fmt.Errorf("unable to notify height: %w", err)
+ }
+
b.notifyBlockEpochs(
epoch.Height, epoch.Hash, epoch.BlockHeader,
)
- return b.txNotifier.NotifyHeight(uint32(epoch.Height))
+ return nil
}
// notifyBlockEpochs notifies all registered block epoch clients of the newly
diff --git a/chainntnfs/interface.go b/chainntnfs/interface.go
index 3337f1451a..b2383636aa 100644
--- a/chainntnfs/interface.go
+++ b/chainntnfs/interface.go
@@ -258,6 +258,9 @@ type ConfirmationEvent struct {
// channels.
func NewConfirmationEvent(numConfs uint32, cancel func()) *ConfirmationEvent {
return &ConfirmationEvent{
+ // We cannot rely on the subscriber to immediately read from
+ // the channel so we need to create a larger buffer to avoid
+ // blocking the notifier.
Confirmed: make(chan *TxConfirmation, 1),
Updates: make(chan uint32, numConfs),
NegativeConf: make(chan int32, 1),
diff --git a/chainntnfs/neutrinonotify/neutrino.go b/chainntnfs/neutrinonotify/neutrino.go
index 55d9235779..7b93bcd80d 100644
--- a/chainntnfs/neutrinonotify/neutrino.go
+++ b/chainntnfs/neutrinonotify/neutrino.go
@@ -689,10 +689,16 @@ func (n *NeutrinoNotifier) handleBlockConnected(newBlock *filteredBlock) error {
n.bestBlock.Height = int32(newBlock.height)
n.bestBlock.BlockHeader = newBlock.header
+ err = n.txNotifier.NotifyHeight(newBlock.height)
+ if err != nil {
+ return fmt.Errorf("unable to notify height: %w", err)
+ }
+
n.notifyBlockEpochs(
int32(newBlock.height), &newBlock.hash, newBlock.header,
)
- return n.txNotifier.NotifyHeight(newBlock.height)
+
+ return nil
}
// getFilteredBlock is a utility to retrieve the full filtered block from a block epoch.
diff --git a/chainntnfs/txnotifier.go b/chainntnfs/txnotifier.go
index be471da3c7..71b9c929df 100644
--- a/chainntnfs/txnotifier.go
+++ b/chainntnfs/txnotifier.go
@@ -244,20 +244,25 @@ type ConfNtfn struct {
// notification is to be sent.
NumConfirmations uint32
- // Event contains references to the channels that the notifications are to
- // be sent over.
+ // Event contains references to the channels that the notifications are
+ // to be sent over.
Event *ConfirmationEvent
// HeightHint is the minimum height in the chain that we expect to find
// this txid.
HeightHint uint32
- // dispatched is false if the confirmed notification has not been sent yet.
+ // dispatched is false if the confirmed notification has not been sent
+ // yet.
dispatched bool
// includeBlock is true if the dispatched notification should also have
// the block included with it.
includeBlock bool
+
+ // numConfsLeft is the number of confirmations left to be sent to the
+ // subscriber.
+ numConfsLeft uint32
}
// HistoricalConfDispatch parametrizes a manual rescan for a particular
@@ -589,6 +594,7 @@ func (n *TxNotifier) newConfNtfn(txid *chainhash.Hash,
}),
HeightHint: heightHint,
includeBlock: opts.includeBlock,
+ numConfsLeft: numConfs,
}, nil
}
@@ -664,8 +670,8 @@ func (n *TxNotifier) RegisterConf(txid *chainhash.Hash, pkScript []byte,
// already been found, we'll attempt to deliver them immediately
// to this client.
Log.Debugf("Attempting to dispatch confirmation for %v on "+
- "registration since rescan has finished",
- ntfn.ConfRequest)
+ "registration since rescan has finished, conf_id=%v",
+ ntfn.ConfRequest, ntfn.ConfID)
// The default notification we assigned above includes the
// block along with the rest of the details. However not all
@@ -679,9 +685,13 @@ func (n *TxNotifier) RegisterConf(txid *chainhash.Hash, pkScript []byte,
confDetails = &confDetailsCopy
}
- err := n.dispatchConfDetails(ntfn, confDetails)
- if err != nil {
- return nil, err
+ // Deliver the details to the whole conf set where this ntfn
+ // lives in.
+ for _, subscriber := range confSet.ntfns {
+ err := n.dispatchConfDetails(subscriber, confDetails)
+ if err != nil {
+ return nil, err
+ }
}
return &ConfRegistration{
@@ -912,10 +922,16 @@ func (n *TxNotifier) dispatchConfDetails(
// If there are no conf details to dispatch or if the notification has
// already been dispatched, then we can skip dispatching to this
// client.
- if details == nil || ntfn.dispatched {
- Log.Debugf("Skipping dispatch of conf details(%v) for "+
- "request %v, dispatched=%v", details, ntfn.ConfRequest,
- ntfn.dispatched)
+ if details == nil {
+ Log.Debugf("Skipped dispatching nil conf details for request "+
+ "%v, conf_id=%v", ntfn.ConfRequest, ntfn.ConfID)
+
+ return nil
+ }
+
+ if ntfn.dispatched {
+ Log.Debugf("Skipped dispatched conf details for request %v "+
+ "conf_id=%v", ntfn.ConfRequest, ntfn.ConfID)
return nil
}
@@ -925,16 +941,16 @@ func (n *TxNotifier) dispatchConfDetails(
// we'll dispatch a confirmation notification to the caller.
confHeight := details.BlockHeight + ntfn.NumConfirmations - 1
if confHeight <= n.currentHeight {
- Log.Debugf("Dispatching %v confirmation notification for %v",
- ntfn.NumConfirmations, ntfn.ConfRequest)
+ Log.Debugf("Dispatching %v confirmation notification for "+
+ "conf_id=%v, %v", ntfn.NumConfirmations, ntfn.ConfID,
+ ntfn.ConfRequest)
// We'll send a 0 value to the Updates channel,
// indicating that the transaction/output script has already
// been confirmed.
- select {
- case ntfn.Event.Updates <- 0:
- case <-n.quit:
- return ErrTxNotifierExiting
+ err := n.notifyNumConfsLeft(ntfn, 0)
+ if err != nil {
+ return err
}
select {
@@ -944,8 +960,8 @@ func (n *TxNotifier) dispatchConfDetails(
return ErrTxNotifierExiting
}
} else {
- Log.Debugf("Queueing %v confirmation notification for %v at tip ",
- ntfn.NumConfirmations, ntfn.ConfRequest)
+ Log.Debugf("Queueing %v confirmation notification for %v at "+
+ "tip", ntfn.NumConfirmations, ntfn.ConfRequest)
// Otherwise, we'll keep track of the notification
// request by the height at which we should dispatch the
@@ -961,10 +977,9 @@ func (n *TxNotifier) dispatchConfDetails(
// confirmations are left for the transaction/output script to
// be confirmed.
numConfsLeft := confHeight - n.currentHeight
- select {
- case ntfn.Event.Updates <- numConfsLeft:
- case <-n.quit:
- return ErrTxNotifierExiting
+ err := n.notifyNumConfsLeft(ntfn, numConfsLeft)
+ if err != nil {
+ return err
}
}
@@ -1729,10 +1744,9 @@ func (n *TxNotifier) NotifyHeight(height uint32) error {
continue
}
- select {
- case ntfn.Event.Updates <- numConfsLeft:
- case <-n.quit:
- return ErrTxNotifierExiting
+ err := n.notifyNumConfsLeft(ntfn, numConfsLeft)
+ if err != nil {
+ return err
}
}
}
@@ -1743,8 +1757,9 @@ func (n *TxNotifier) NotifyHeight(height uint32) error {
for ntfn := range n.ntfnsByConfirmHeight[height] {
confSet := n.confNotifications[ntfn.ConfRequest]
- Log.Debugf("Dispatching %v confirmation notification for %v",
- ntfn.NumConfirmations, ntfn.ConfRequest)
+ Log.Debugf("Dispatching %v confirmation notification for "+
+ "conf_id=%v, %v", ntfn.NumConfirmations, ntfn.ConfID,
+ ntfn.ConfRequest)
// The default notification we assigned above includes the
// block along with the rest of the details. However not all
@@ -1833,6 +1848,9 @@ func (n *TxNotifier) DisconnectTip(blockHeight uint32) error {
default:
}
+ // We also reset the num of confs update.
+ ntfn.numConfsLeft = ntfn.NumConfirmations
+
// Then, we'll check if the current
// transaction/output script was included in the
// block currently being disconnected. If it
@@ -2069,3 +2087,30 @@ func (n *TxNotifier) TearDown() {
}
}
}
+
+// notifyNumConfsLeft sends the number of confirmations left to the
+// notification subscriber through the Event.Updates channel.
+//
+// NOTE: must be used with the TxNotifier's lock held.
+func (n *TxNotifier) notifyNumConfsLeft(ntfn *ConfNtfn, num uint32) error {
+ // If the number left is no less than the recorded value, we can skip
+ // sending it as it means this same value has already been sent before.
+ if num >= ntfn.numConfsLeft {
+ Log.Debugf("Skipped dispatched update (numConfsLeft=%v) for "+
+ "request %v conf_id=%v", num, ntfn.ConfRequest,
+ ntfn.ConfID)
+
+ return nil
+ }
+
+ // Update the number of confirmations left to the notification.
+ ntfn.numConfsLeft = num
+
+ select {
+ case ntfn.Event.Updates <- num:
+ case <-n.quit:
+ return ErrTxNotifierExiting
+ }
+
+ return nil
+}
diff --git a/chanrestore.go b/chanrestore.go
index 5b221c105a..a041f571a8 100644
--- a/chanrestore.go
+++ b/chanrestore.go
@@ -8,6 +8,7 @@ import (
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/chaincfg/chainhash"
+ "github.com/btcsuite/btcd/wire"
"github.com/lightningnetwork/lnd/chanbackup"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/contractcourt"
@@ -286,6 +287,9 @@ func (c *chanDBRestorer) RestoreChansFromSingles(backups ...chanbackup.Single) e
ltndLog.Infof("Informing chain watchers of new restored channels")
+ // Create a slice of channel points.
+ chanPoints := make([]wire.OutPoint, 0, len(channelShells))
+
// Finally, we'll need to inform the chain arbitrator of these new
// channels so we'll properly watch for their ultimate closure on chain
// and sweep them via the DLP.
@@ -294,8 +298,15 @@ func (c *chanDBRestorer) RestoreChansFromSingles(backups ...chanbackup.Single) e
if err != nil {
return err
}
+
+ chanPoints = append(
+ chanPoints, restoredChannel.Chan.FundingOutpoint,
+ )
}
+ // With all the channels restored, we'll now re-send the blockbeat.
+ c.chainArb.RedispatchBlockbeat(chanPoints)
+
return nil
}
@@ -314,7 +325,7 @@ func (s *server) ConnectPeer(nodePub *btcec.PublicKey, addrs []net.Addr) error {
// to ensure the new connection is created after this new link/channel
// is known.
if err := s.DisconnectPeer(nodePub); err != nil {
- ltndLog.Infof("Peer(%v) is already connected, proceeding "+
+ ltndLog.Infof("Peer(%x) is already connected, proceeding "+
"with chan restore", nodePub.SerializeCompressed())
}
diff --git a/config.go b/config.go
index 9b9e9573e0..04a4917658 100644
--- a/config.go
+++ b/config.go
@@ -562,8 +562,6 @@ func DefaultConfig() Config {
LetsEncryptDir: defaultLetsEncryptDir,
LetsEncryptListen: defaultLetsEncryptListen,
LogDir: defaultLogDir,
- MaxLogFiles: build.DefaultMaxLogFiles,
- MaxLogFileSize: build.DefaultMaxLogFileSize,
AcceptorTimeout: defaultAcceptorTimeout,
WSPingInterval: lnrpc.DefaultPingInterval,
WSPongWait: lnrpc.DefaultPongWait,
@@ -862,6 +860,18 @@ func LoadConfig(interceptor signal.Interceptor) (*Config, error) {
func ValidateConfig(cfg Config, interceptor signal.Interceptor, fileParser,
flagParser *flags.Parser) (*Config, error) {
+ // Special show command to list supported subsystems and exit.
+ if cfg.DebugLevel == "show" {
+ subLogMgr := build.NewSubLoggerManager()
+
+ // Initialize logging at the default logging level.
+ SetupLoggers(subLogMgr, interceptor)
+
+ fmt.Println("Supported subsystems",
+ subLogMgr.SupportedSubsystems())
+ os.Exit(0)
+ }
+
// If the provided lnd directory is not the default, we'll modify the
// path to all of the files and directories that will live within it.
lndDir := CleanAndExpandPath(cfg.LndDir)
@@ -1251,7 +1261,7 @@ func ValidateConfig(cfg Config, interceptor signal.Interceptor, fileParser,
// The target network must be provided, otherwise, we won't
// know how to initialize the daemon.
if numNets == 0 {
- str := "either --bitcoin.mainnet, or bitcoin.testnet," +
+ str := "either --bitcoin.mainnet, or bitcoin.testnet, " +
"bitcoin.simnet, bitcoin.regtest or bitcoin.signet " +
"must be specified"
@@ -1410,14 +1420,7 @@ func ValidateConfig(cfg Config, interceptor signal.Interceptor, fileParser,
// Initialize logging at the default logging level.
SetupLoggers(cfg.SubLogMgr, interceptor)
- // Special show command to list supported subsystems and exit.
- if cfg.DebugLevel == "show" {
- fmt.Println("Supported subsystems",
- cfg.SubLogMgr.SupportedSubsystems())
- os.Exit(0)
- }
-
- if cfg.MaxLogFiles != build.DefaultMaxLogFiles {
+ if cfg.MaxLogFiles != 0 {
if cfg.LogConfig.File.MaxLogFiles !=
build.DefaultMaxLogFiles {
@@ -1427,7 +1430,7 @@ func ValidateConfig(cfg Config, interceptor signal.Interceptor, fileParser,
cfg.LogConfig.File.MaxLogFiles = cfg.MaxLogFiles
}
- if cfg.MaxLogFileSize != build.DefaultMaxLogFileSize {
+ if cfg.MaxLogFileSize != 0 {
if cfg.LogConfig.File.MaxLogFileSize !=
build.DefaultMaxLogFileSize {
diff --git a/contractcourt/anchor_resolver.go b/contractcourt/anchor_resolver.go
index b4d6877202..56d86aebe2 100644
--- a/contractcourt/anchor_resolver.go
+++ b/contractcourt/anchor_resolver.go
@@ -2,6 +2,7 @@ package contractcourt
import (
"errors"
+ "fmt"
"io"
"sync"
@@ -23,9 +24,6 @@ type anchorResolver struct {
// anchor is the outpoint on the commitment transaction.
anchor wire.OutPoint
- // resolved reflects if the contract has been fully resolved or not.
- resolved bool
-
// broadcastHeight is the height that the original contract was
// broadcast to the main-chain at. We'll use this value to bound any
// historical queries to the chain for spends/confirmations.
@@ -71,7 +69,7 @@ func newAnchorResolver(anchorSignDescriptor input.SignDescriptor,
currentReport: report,
}
- r.initLogger(r)
+ r.initLogger(fmt.Sprintf("%T(%v)", r, r.anchor))
return r
}
@@ -83,49 +81,12 @@ func (c *anchorResolver) ResolverKey() []byte {
return nil
}
-// Resolve offers the anchor output to the sweeper and waits for it to be swept.
-func (c *anchorResolver) Resolve(_ bool) (ContractResolver, error) {
- // Attempt to update the sweep parameters to the post-confirmation
- // situation. We don't want to force sweep anymore, because the anchor
- // lost its special purpose to get the commitment confirmed. It is just
- // an output that we want to sweep only if it is economical to do so.
- //
- // An exclusive group is not necessary anymore, because we know that
- // this is the only anchor that can be swept.
- //
- // We also clear the parent tx information for cpfp, because the
- // commitment tx is confirmed.
- //
- // After a restart or when the remote force closes, the sweeper is not
- // yet aware of the anchor. In that case, it will be added as new input
- // to the sweeper.
- witnessType := input.CommitmentAnchor
-
- // For taproot channels, we need to use the proper witness type.
- if c.chanType.IsTaproot() {
- witnessType = input.TaprootAnchorSweepSpend
- }
-
- anchorInput := input.MakeBaseInput(
- &c.anchor, witnessType, &c.anchorSignDescriptor,
- c.broadcastHeight, nil,
- )
-
- resultChan, err := c.Sweeper.SweepInput(
- &anchorInput,
- sweep.Params{
- // For normal anchor sweeping, the budget is 330 sats.
- Budget: btcutil.Amount(
- anchorInput.SignDesc().Output.Value,
- ),
-
- // There's no rush to sweep the anchor, so we use a nil
- // deadline here.
- DeadlineHeight: fn.None[int32](),
- },
- )
- if err != nil {
- return nil, err
+// Resolve waits for the output to be swept.
+func (c *anchorResolver) Resolve() (ContractResolver, error) {
+ // If we're already resolved, then we can exit early.
+ if c.IsResolved() {
+ c.log.Errorf("already resolved")
+ return nil, nil
}
var (
@@ -134,7 +95,7 @@ func (c *anchorResolver) Resolve(_ bool) (ContractResolver, error) {
)
select {
- case sweepRes := <-resultChan:
+ case sweepRes := <-c.sweepResultChan:
switch sweepRes.Err {
// Anchor was swept successfully.
case nil:
@@ -160,6 +121,8 @@ func (c *anchorResolver) Resolve(_ bool) (ContractResolver, error) {
return nil, errResolverShuttingDown
}
+ c.log.Infof("resolved in tx %v", spendTx)
+
// Update report to reflect that funds are no longer in limbo.
c.reportLock.Lock()
if outcome == channeldb.ResolverOutcomeClaimed {
@@ -171,7 +134,7 @@ func (c *anchorResolver) Resolve(_ bool) (ContractResolver, error) {
)
c.reportLock.Unlock()
- c.resolved = true
+ c.resolved.Store(true)
return nil, c.PutResolverReport(nil, report)
}
@@ -180,15 +143,10 @@ func (c *anchorResolver) Resolve(_ bool) (ContractResolver, error) {
//
// NOTE: Part of the ContractResolver interface.
func (c *anchorResolver) Stop() {
- close(c.quit)
-}
+ c.log.Debugf("stopping...")
+ defer c.log.Debugf("stopped")
-// IsResolved returns true if the stored state in the resolve is fully
-// resolved. In this case the target output can be forgotten.
-//
-// NOTE: Part of the ContractResolver interface.
-func (c *anchorResolver) IsResolved() bool {
- return c.resolved
+ close(c.quit)
}
// SupplementState allows the user of a ContractResolver to supplement it with
@@ -215,3 +173,68 @@ func (c *anchorResolver) Encode(w io.Writer) error {
// A compile time assertion to ensure anchorResolver meets the
// ContractResolver interface.
var _ ContractResolver = (*anchorResolver)(nil)
+
+// Launch offers the anchor output to the sweeper.
+func (c *anchorResolver) Launch() error {
+ if c.launched.Load() {
+ c.log.Tracef("already launched")
+ return nil
+ }
+
+ c.log.Debugf("launching resolver...")
+ c.launched.Store(true)
+
+ // If we're already resolved, then we can exit early.
+ if c.IsResolved() {
+ c.log.Errorf("already resolved")
+ return nil
+ }
+
+ // Attempt to update the sweep parameters to the post-confirmation
+ // situation. We don't want to force sweep anymore, because the anchor
+ // lost its special purpose to get the commitment confirmed. It is just
+ // an output that we want to sweep only if it is economical to do so.
+ //
+ // An exclusive group is not necessary anymore, because we know that
+ // this is the only anchor that can be swept.
+ //
+ // We also clear the parent tx information for cpfp, because the
+ // commitment tx is confirmed.
+ //
+ // After a restart or when the remote force closes, the sweeper is not
+ // yet aware of the anchor. In that case, it will be added as new input
+ // to the sweeper.
+ witnessType := input.CommitmentAnchor
+
+ // For taproot channels, we need to use the proper witness type.
+ if c.chanType.IsTaproot() {
+ witnessType = input.TaprootAnchorSweepSpend
+ }
+
+ anchorInput := input.MakeBaseInput(
+ &c.anchor, witnessType, &c.anchorSignDescriptor,
+ c.broadcastHeight, nil,
+ )
+
+ resultChan, err := c.Sweeper.SweepInput(
+ &anchorInput,
+ sweep.Params{
+ // For normal anchor sweeping, the budget is 330 sats.
+ Budget: btcutil.Amount(
+ anchorInput.SignDesc().Output.Value,
+ ),
+
+ // There's no rush to sweep the anchor, so we use a nil
+ // deadline here.
+ DeadlineHeight: fn.None[int32](),
+ },
+ )
+
+ if err != nil {
+ return err
+ }
+
+ c.sweepResultChan = resultChan
+
+ return nil
+}
diff --git a/contractcourt/breach_arbitrator_test.go b/contractcourt/breach_arbitrator_test.go
index bd4ad85683..2001431c79 100644
--- a/contractcourt/breach_arbitrator_test.go
+++ b/contractcourt/breach_arbitrator_test.go
@@ -36,7 +36,7 @@ import (
)
var (
- defaultTimeout = 30 * time.Second
+ defaultTimeout = 10 * time.Second
breachOutPoints = []wire.OutPoint{
{
diff --git a/contractcourt/breach_resolver.go b/contractcourt/breach_resolver.go
index 740b4471d5..a0a342cfc7 100644
--- a/contractcourt/breach_resolver.go
+++ b/contractcourt/breach_resolver.go
@@ -2,6 +2,7 @@ package contractcourt
import (
"encoding/binary"
+ "fmt"
"io"
"github.com/lightningnetwork/lnd/channeldb"
@@ -11,9 +12,6 @@ import (
// future, this will likely take over the duties the current BreachArbitrator
// has.
type breachResolver struct {
- // resolved reflects if the contract has been fully resolved or not.
- resolved bool
-
// subscribed denotes whether or not the breach resolver has subscribed
// to the BreachArbitrator for breach resolution.
subscribed bool
@@ -32,7 +30,7 @@ func newBreachResolver(resCfg ResolverConfig) *breachResolver {
replyChan: make(chan struct{}),
}
- r.initLogger(r)
+ r.initLogger(fmt.Sprintf("%T(%v)", r, r.ChanPoint))
return r
}
@@ -47,7 +45,7 @@ func (b *breachResolver) ResolverKey() []byte {
// been broadcast.
//
// TODO(yy): let sweeper handle the breach inputs.
-func (b *breachResolver) Resolve(_ bool) (ContractResolver, error) {
+func (b *breachResolver) Resolve() (ContractResolver, error) {
if !b.subscribed {
complete, err := b.SubscribeBreachComplete(
&b.ChanPoint, b.replyChan,
@@ -59,7 +57,7 @@ func (b *breachResolver) Resolve(_ bool) (ContractResolver, error) {
// If the breach resolution process is already complete, then
// we can cleanup and checkpoint the resolved state.
if complete {
- b.resolved = true
+ b.resolved.Store(true)
return nil, b.Checkpoint(b)
}
@@ -72,7 +70,7 @@ func (b *breachResolver) Resolve(_ bool) (ContractResolver, error) {
// The replyChan has been closed, signalling that the breach
// has been fully resolved. Checkpoint the resolved state and
// exit.
- b.resolved = true
+ b.resolved.Store(true)
return nil, b.Checkpoint(b)
case <-b.quit:
}
@@ -82,13 +80,14 @@ func (b *breachResolver) Resolve(_ bool) (ContractResolver, error) {
// Stop signals the breachResolver to stop.
func (b *breachResolver) Stop() {
+ b.log.Debugf("stopping...")
close(b.quit)
}
// IsResolved returns true if the breachResolver is fully resolved and cleanup
// can occur.
func (b *breachResolver) IsResolved() bool {
- return b.resolved
+ return b.resolved.Load()
}
// SupplementState adds additional state to the breachResolver.
@@ -97,7 +96,7 @@ func (b *breachResolver) SupplementState(_ *channeldb.OpenChannel) {
// Encode encodes the breachResolver to the passed writer.
func (b *breachResolver) Encode(w io.Writer) error {
- return binary.Write(w, endian, b.resolved)
+ return binary.Write(w, endian, b.resolved.Load())
}
// newBreachResolverFromReader attempts to decode an encoded breachResolver
@@ -110,11 +109,13 @@ func newBreachResolverFromReader(r io.Reader, resCfg ResolverConfig) (
replyChan: make(chan struct{}),
}
- if err := binary.Read(r, endian, &b.resolved); err != nil {
+ var resolved bool
+ if err := binary.Read(r, endian, &resolved); err != nil {
return nil, err
}
+ b.resolved.Store(resolved)
- b.initLogger(b)
+ b.initLogger(fmt.Sprintf("%T(%v)", b, b.ChanPoint))
return b, nil
}
@@ -122,3 +123,16 @@ func newBreachResolverFromReader(r io.Reader, resCfg ResolverConfig) (
// A compile time assertion to ensure breachResolver meets the ContractResolver
// interface.
var _ ContractResolver = (*breachResolver)(nil)
+
+// TODO(yy): implement it once the outputs are offered to the sweeper.
+func (b *breachResolver) Launch() error {
+ if b.launched.Load() {
+ b.log.Tracef("already launched")
+ return nil
+ }
+
+ b.log.Debugf("launching resolver...")
+ b.launched.Store(true)
+
+ return nil
+}
diff --git a/contractcourt/briefcase_test.go b/contractcourt/briefcase_test.go
index 0f44db2abb..ee6e24591e 100644
--- a/contractcourt/briefcase_test.go
+++ b/contractcourt/briefcase_test.go
@@ -206,8 +206,8 @@ func assertResolversEqual(t *testing.T, originalResolver ContractResolver,
ogRes.outputIncubating, diskRes.outputIncubating)
}
if ogRes.resolved != diskRes.resolved {
- t.Fatalf("expected %v, got %v", ogRes.resolved,
- diskRes.resolved)
+ t.Fatalf("expected %v, got %v", ogRes.resolved.Load(),
+ diskRes.resolved.Load())
}
if ogRes.broadcastHeight != diskRes.broadcastHeight {
t.Fatalf("expected %v, got %v",
@@ -229,8 +229,8 @@ func assertResolversEqual(t *testing.T, originalResolver ContractResolver,
ogRes.outputIncubating, diskRes.outputIncubating)
}
if ogRes.resolved != diskRes.resolved {
- t.Fatalf("expected %v, got %v", ogRes.resolved,
- diskRes.resolved)
+ t.Fatalf("expected %v, got %v", ogRes.resolved.Load(),
+ diskRes.resolved.Load())
}
if ogRes.broadcastHeight != diskRes.broadcastHeight {
t.Fatalf("expected %v, got %v",
@@ -275,8 +275,8 @@ func assertResolversEqual(t *testing.T, originalResolver ContractResolver,
ogRes.commitResolution, diskRes.commitResolution)
}
if ogRes.resolved != diskRes.resolved {
- t.Fatalf("expected %v, got %v", ogRes.resolved,
- diskRes.resolved)
+ t.Fatalf("expected %v, got %v", ogRes.resolved.Load(),
+ diskRes.resolved.Load())
}
if ogRes.broadcastHeight != diskRes.broadcastHeight {
t.Fatalf("expected %v, got %v",
@@ -312,13 +312,14 @@ func TestContractInsertionRetrieval(t *testing.T) {
SweepSignDesc: testSignDesc,
},
outputIncubating: true,
- resolved: true,
broadcastHeight: 102,
htlc: channeldb.HTLC{
HtlcIndex: 12,
},
}
- successResolver := htlcSuccessResolver{
+ timeoutResolver.resolved.Store(true)
+
+ successResolver := &htlcSuccessResolver{
htlcResolution: lnwallet.IncomingHtlcResolution{
Preimage: testPreimage,
SignedSuccessTx: nil,
@@ -327,40 +328,49 @@ func TestContractInsertionRetrieval(t *testing.T) {
SweepSignDesc: testSignDesc,
},
outputIncubating: true,
- resolved: true,
broadcastHeight: 109,
htlc: channeldb.HTLC{
RHash: testPreimage,
},
}
- resolvers := []ContractResolver{
- &timeoutResolver,
- &successResolver,
- &commitSweepResolver{
- commitResolution: lnwallet.CommitOutputResolution{
- SelfOutPoint: testChanPoint2,
- SelfOutputSignDesc: testSignDesc,
- MaturityDelay: 99,
- },
- resolved: false,
- broadcastHeight: 109,
- chanPoint: testChanPoint1,
+ successResolver.resolved.Store(true)
+
+ commitResolver := &commitSweepResolver{
+ commitResolution: lnwallet.CommitOutputResolution{
+ SelfOutPoint: testChanPoint2,
+ SelfOutputSignDesc: testSignDesc,
+ MaturityDelay: 99,
},
+ broadcastHeight: 109,
+ chanPoint: testChanPoint1,
+ }
+ commitResolver.resolved.Store(false)
+
+ resolvers := []ContractResolver{
+ &timeoutResolver, successResolver, commitResolver,
}
// All resolvers require a unique ResolverKey() output. To achieve this
// for the composite resolvers, we'll mutate the underlying resolver
// with a new outpoint.
- contestTimeout := timeoutResolver
- contestTimeout.htlcResolution.ClaimOutpoint = randOutPoint()
+ contestTimeout := htlcTimeoutResolver{
+ htlcResolution: lnwallet.OutgoingHtlcResolution{
+ ClaimOutpoint: randOutPoint(),
+ SweepSignDesc: testSignDesc,
+ },
+ }
resolvers = append(resolvers, &htlcOutgoingContestResolver{
htlcTimeoutResolver: &contestTimeout,
})
- contestSuccess := successResolver
- contestSuccess.htlcResolution.ClaimOutpoint = randOutPoint()
+ contestSuccess := &htlcSuccessResolver{
+ htlcResolution: lnwallet.IncomingHtlcResolution{
+ ClaimOutpoint: randOutPoint(),
+ SweepSignDesc: testSignDesc,
+ },
+ }
resolvers = append(resolvers, &htlcIncomingContestResolver{
htlcExpiry: 100,
- htlcSuccessResolver: &contestSuccess,
+ htlcSuccessResolver: contestSuccess,
})
// For quick lookup during the test, we'll create this map which allow
@@ -438,12 +448,12 @@ func TestContractResolution(t *testing.T) {
SweepSignDesc: testSignDesc,
},
outputIncubating: true,
- resolved: true,
broadcastHeight: 192,
htlc: channeldb.HTLC{
HtlcIndex: 9912,
},
}
+ timeoutResolver.resolved.Store(true)
// First, we'll insert the resolver into the database and ensure that
// we get the same resolver out the other side. We do not need to apply
@@ -491,12 +501,13 @@ func TestContractSwapping(t *testing.T) {
SweepSignDesc: testSignDesc,
},
outputIncubating: true,
- resolved: true,
broadcastHeight: 102,
htlc: channeldb.HTLC{
HtlcIndex: 12,
},
}
+ timeoutResolver.resolved.Store(true)
+
contestResolver := &htlcOutgoingContestResolver{
htlcTimeoutResolver: timeoutResolver,
}
diff --git a/contractcourt/chain_arbitrator.go b/contractcourt/chain_arbitrator.go
index c29178b438..d888f3edd1 100644
--- a/contractcourt/chain_arbitrator.go
+++ b/contractcourt/chain_arbitrator.go
@@ -11,6 +11,7 @@ import (
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcwallet/walletdb"
+ "github.com/lightningnetwork/lnd/chainio"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/models"
@@ -244,6 +245,10 @@ type ChainArbitrator struct {
started int32 // To be used atomically.
stopped int32 // To be used atomically.
+ // Embed the blockbeat consumer struct to get access to the method
+ // `NotifyBlockProcessed` and the `BlockbeatChan`.
+ chainio.BeatConsumer
+
sync.Mutex
// activeChannels is a map of all the active contracts that are still
@@ -262,6 +267,9 @@ type ChainArbitrator struct {
// active channels that it must still watch over.
chanSource *channeldb.DB
+ // beat is the current best known blockbeat.
+ beat chainio.Blockbeat
+
quit chan struct{}
wg sync.WaitGroup
@@ -272,15 +280,23 @@ type ChainArbitrator struct {
func NewChainArbitrator(cfg ChainArbitratorConfig,
db *channeldb.DB) *ChainArbitrator {
- return &ChainArbitrator{
+ c := &ChainArbitrator{
cfg: cfg,
activeChannels: make(map[wire.OutPoint]*ChannelArbitrator),
activeWatchers: make(map[wire.OutPoint]*chainWatcher),
chanSource: db,
quit: make(chan struct{}),
}
+
+ // Mount the block consumer.
+ c.BeatConsumer = chainio.NewBeatConsumer(c.quit, c.Name())
+
+ return c
}
+// Compile-time check for the chainio.Consumer interface.
+var _ chainio.Consumer = (*ChainArbitrator)(nil)
+
// arbChannel is a wrapper around an open channel that channel arbitrators
// interact with.
type arbChannel struct {
@@ -526,7 +542,7 @@ func (c *ChainArbitrator) ResolveContract(chanPoint wire.OutPoint) error {
if err := chainArb.Stop(); err != nil {
log.Warnf("unable to stop ChannelArbitrator(%v): %v",
- chanPoint, err)
+ chainArb.id(), err)
}
}
if chainWatcher != nil {
@@ -551,147 +567,27 @@ func (c *ChainArbitrator) ResolveContract(chanPoint wire.OutPoint) error {
}
// Start launches all goroutines that the ChainArbitrator needs to operate.
-func (c *ChainArbitrator) Start() error {
+func (c *ChainArbitrator) Start(beat chainio.Blockbeat) error {
if !atomic.CompareAndSwapInt32(&c.started, 0, 1) {
return nil
}
- log.Infof("ChainArbitrator starting with config: budget=[%v]",
- &c.cfg.Budget)
+ // Set the current beat.
+ c.beat = beat
// First, we'll fetch all the channels that are still open, in order to
// collect them within our set of active contracts.
- openChannels, err := c.chanSource.ChannelStateDB().FetchAllChannels()
- if err != nil {
+ if err := c.loadOpenChannels(); err != nil {
return err
}
- if len(openChannels) > 0 {
- log.Infof("Creating ChannelArbitrators for %v active channels",
- len(openChannels))
- }
-
- // For each open channel, we'll configure then launch a corresponding
- // ChannelArbitrator.
- for _, channel := range openChannels {
- chanPoint := channel.FundingOutpoint
- channel := channel
-
- // First, we'll create an active chainWatcher for this channel
- // to ensure that we detect any relevant on chain events.
- breachClosure := func(ret *lnwallet.BreachRetribution) error {
- return c.cfg.ContractBreach(chanPoint, ret)
- }
-
- chainWatcher, err := newChainWatcher(
- chainWatcherConfig{
- chanState: channel,
- notifier: c.cfg.Notifier,
- signer: c.cfg.Signer,
- isOurAddr: c.cfg.IsOurAddress,
- contractBreach: breachClosure,
- extractStateNumHint: lnwallet.GetStateNumHint,
- auxLeafStore: c.cfg.AuxLeafStore,
- auxResolver: c.cfg.AuxResolver,
- },
- )
- if err != nil {
- return err
- }
-
- c.activeWatchers[chanPoint] = chainWatcher
- channelArb, err := newActiveChannelArbitrator(
- channel, c, chainWatcher.SubscribeChannelEvents(),
- )
- if err != nil {
- return err
- }
-
- c.activeChannels[chanPoint] = channelArb
-
- // Republish any closing transactions for this channel.
- err = c.republishClosingTxs(channel)
- if err != nil {
- log.Errorf("Failed to republish closing txs for "+
- "channel %v", chanPoint)
- }
- }
-
// In addition to the channels that we know to be open, we'll also
// launch arbitrators to finishing resolving any channels that are in
// the pending close state.
- closingChannels, err := c.chanSource.ChannelStateDB().FetchClosedChannels(
- true,
- )
- if err != nil {
+ if err := c.loadPendingCloseChannels(); err != nil {
return err
}
- if len(closingChannels) > 0 {
- log.Infof("Creating ChannelArbitrators for %v closing channels",
- len(closingChannels))
- }
-
- // Next, for each channel is the closing state, we'll launch a
- // corresponding more restricted resolver, as we don't have to watch
- // the chain any longer, only resolve the contracts on the confirmed
- // commitment.
- //nolint:lll
- for _, closeChanInfo := range closingChannels {
- // We can leave off the CloseContract and ForceCloseChan
- // methods as the channel is already closed at this point.
- chanPoint := closeChanInfo.ChanPoint
- arbCfg := ChannelArbitratorConfig{
- ChanPoint: chanPoint,
- ShortChanID: closeChanInfo.ShortChanID,
- ChainArbitratorConfig: c.cfg,
- ChainEvents: &ChainEventSubscription{},
- IsPendingClose: true,
- ClosingHeight: closeChanInfo.CloseHeight,
- CloseType: closeChanInfo.CloseType,
- PutResolverReport: func(tx kvdb.RwTx,
- report *channeldb.ResolverReport) error {
-
- return c.chanSource.PutResolverReport(
- tx, c.cfg.ChainHash, &chanPoint, report,
- )
- },
- FetchHistoricalChannel: func() (*channeldb.OpenChannel, error) {
- chanStateDB := c.chanSource.ChannelStateDB()
- return chanStateDB.FetchHistoricalChannel(&chanPoint)
- },
- FindOutgoingHTLCDeadline: func(
- htlc channeldb.HTLC) fn.Option[int32] {
-
- return c.FindOutgoingHTLCDeadline(
- closeChanInfo.ShortChanID, htlc,
- )
- },
- }
- chanLog, err := newBoltArbitratorLog(
- c.chanSource.Backend, arbCfg, c.cfg.ChainHash, chanPoint,
- )
- if err != nil {
- return err
- }
- arbCfg.MarkChannelResolved = func() error {
- if c.cfg.NotifyFullyResolvedChannel != nil {
- c.cfg.NotifyFullyResolvedChannel(chanPoint)
- }
-
- return c.ResolveContract(chanPoint)
- }
-
- // We create an empty map of HTLC's here since it's possible
- // that the channel is in StateDefault and updateActiveHTLCs is
- // called. We want to avoid writing to an empty map. Since the
- // channel is already in the process of being resolved, no new
- // HTLCs will be added.
- c.activeChannels[chanPoint] = NewChannelArbitrator(
- arbCfg, make(map[HtlcSetKey]htlcSet), chanLog,
- )
- }
-
// Now, we'll start all chain watchers in parallel to shorten start up
// duration. In neutrino mode, this allows spend registrations to take
// advantage of batch spend reporting, instead of doing a single rescan
@@ -743,7 +639,7 @@ func (c *ChainArbitrator) Start() error {
// transaction.
var startStates map[wire.OutPoint]*chanArbStartState
- err = kvdb.View(c.chanSource, func(tx walletdb.ReadTx) error {
+ err := kvdb.View(c.chanSource, func(tx walletdb.ReadTx) error {
for _, arbitrator := range c.activeChannels {
startState, err := arbitrator.getStartState(tx)
if err != nil {
@@ -775,119 +671,45 @@ func (c *ChainArbitrator) Start() error {
arbitrator.cfg.ChanPoint)
}
- if err := arbitrator.Start(startState); err != nil {
+ if err := arbitrator.Start(startState, c.beat); err != nil {
stopAndLog()
return err
}
}
- // Subscribe to a single stream of block epoch notifications that we
- // will dispatch to all active arbitrators.
- blockEpoch, err := c.cfg.Notifier.RegisterBlockEpochNtfn(nil)
- if err != nil {
- return err
- }
-
// Start our goroutine which will dispatch blocks to each arbitrator.
c.wg.Add(1)
go func() {
defer c.wg.Done()
- c.dispatchBlocks(blockEpoch)
+ c.dispatchBlocks()
}()
+ log.Infof("ChainArbitrator starting at height %d with %d chain "+
+ "watchers, %d channel arbitrators, and budget config=[%v]",
+ c.beat.Height(), len(c.activeWatchers), len(c.activeChannels),
+ &c.cfg.Budget)
+
// TODO(roasbeef): eventually move all breach watching here
return nil
}
-// blockRecipient contains the information we need to dispatch a block to a
-// channel arbitrator.
-type blockRecipient struct {
- // chanPoint is the funding outpoint of the channel.
- chanPoint wire.OutPoint
-
- // blocks is the channel that new block heights are sent into. This
- // channel should be sufficiently buffered as to not block the sender.
- blocks chan<- int32
-
- // quit is closed if the receiving entity is shutting down.
- quit chan struct{}
-}
-
// dispatchBlocks consumes a block epoch notification stream and dispatches
// blocks to each of the chain arb's active channel arbitrators. This function
// must be run in a goroutine.
-func (c *ChainArbitrator) dispatchBlocks(
- blockEpoch *chainntnfs.BlockEpochEvent) {
-
- // getRecipients is a helper function which acquires the chain arb
- // lock and returns a set of block recipients which can be used to
- // dispatch blocks.
- getRecipients := func() []blockRecipient {
- c.Lock()
- blocks := make([]blockRecipient, 0, len(c.activeChannels))
- for _, channel := range c.activeChannels {
- blocks = append(blocks, blockRecipient{
- chanPoint: channel.cfg.ChanPoint,
- blocks: channel.blocks,
- quit: channel.quit,
- })
- }
- c.Unlock()
-
- return blocks
- }
-
- // On exit, cancel our blocks subscription and close each block channel
- // so that the arbitrators know they will no longer be receiving blocks.
- defer func() {
- blockEpoch.Cancel()
-
- recipients := getRecipients()
- for _, recipient := range recipients {
- close(recipient.blocks)
- }
- }()
-
+func (c *ChainArbitrator) dispatchBlocks() {
// Consume block epochs until we receive the instruction to shutdown.
for {
select {
// Consume block epochs, exiting if our subscription is
// terminated.
- case block, ok := <-blockEpoch.Epochs:
- if !ok {
- log.Trace("dispatchBlocks block epoch " +
- "cancelled")
- return
- }
+ case beat := <-c.BlockbeatChan:
+ // Set the current blockbeat.
+ c.beat = beat
- // Get the set of currently active channels block
- // subscription channels and dispatch the block to
- // each.
- for _, recipient := range getRecipients() {
- select {
- // Deliver the block to the arbitrator.
- case recipient.blocks <- block.Height:
-
- // If the recipient is shutting down, exit
- // without delivering the block. This may be
- // the case when two blocks are mined in quick
- // succession, and the arbitrator resolves
- // after the first block, and does not need to
- // consume the second block.
- case <-recipient.quit:
- log.Debugf("channel: %v exit without "+
- "receiving block: %v",
- recipient.chanPoint,
- block.Height)
-
- // If the chain arb is shutting down, we don't
- // need to deliver any more blocks (everything
- // will be shutting down).
- case <-c.quit:
- return
- }
- }
+ // Send this blockbeat to all the active channels and
+ // wait for them to finish processing it.
+ c.handleBlockbeat(beat)
// Exit if the chain arbitrator is shutting down.
case <-c.quit:
@@ -896,6 +718,47 @@ func (c *ChainArbitrator) dispatchBlocks(
}
}
+// handleBlockbeat sends the blockbeat to all active channel arbitrator in
+// parallel and wait for them to finish processing it.
+func (c *ChainArbitrator) handleBlockbeat(beat chainio.Blockbeat) {
+ // Read the active channels in a lock.
+ c.Lock()
+
+ // Create a slice to record active channel arbitrator.
+ channels := make([]chainio.Consumer, 0, len(c.activeChannels))
+ watchers := make([]chainio.Consumer, 0, len(c.activeWatchers))
+
+ // Copy the active channels to the slice.
+ for _, channel := range c.activeChannels {
+ channels = append(channels, channel)
+ }
+
+ for _, watcher := range c.activeWatchers {
+ watchers = append(watchers, watcher)
+ }
+
+ c.Unlock()
+
+ // Iterate all the copied watchers and send the blockbeat to them.
+ err := chainio.DispatchConcurrent(beat, watchers)
+ if err != nil {
+ log.Errorf("Notify blockbeat for chainWatcher failed: %v", err)
+ }
+
+ // Iterate all the copied channels and send the blockbeat to them.
+ //
+ // NOTE: This method will timeout if the processing of blocks of the
+ // subsystems is too long (60s).
+ err = chainio.DispatchConcurrent(beat, channels)
+ if err != nil {
+ log.Errorf("Notify blockbeat for ChannelArbitrator failed: %v",
+ err)
+ }
+
+ // Notify the chain arbitrator has processed the block.
+ c.NotifyBlockProcessed(beat, err)
+}
+
// republishClosingTxs will load any stored cooperative or unilateral closing
// transactions and republish them. This helps ensure propagation of the
// transactions in the event that prior publications failed.
@@ -1023,7 +886,7 @@ func (c *ChainArbitrator) Stop() error {
}
for chanPoint, arbitrator := range activeChannels {
log.Tracef("Attempting to stop ChannelArbitrator(%v)",
- chanPoint)
+ arbitrator.id())
if err := arbitrator.Stop(); err != nil {
log.Errorf("unable to stop arbitrator for "+
@@ -1197,8 +1060,8 @@ func (c *ChainArbitrator) WatchNewChannel(newChan *channeldb.OpenChannel) error
chanPoint := newChan.FundingOutpoint
- log.Infof("Creating new ChannelArbitrator for ChannelPoint(%v)",
- chanPoint)
+ log.Infof("Creating new Chainwatcher and ChannelArbitrator for "+
+ "ChannelPoint(%v)", newChan.FundingOutpoint)
// If we're already watching this channel, then we'll ignore this
// request.
@@ -1245,7 +1108,7 @@ func (c *ChainArbitrator) WatchNewChannel(newChan *channeldb.OpenChannel) error
// arbitrators, then launch it.
c.activeChannels[chanPoint] = channelArb
- if err := channelArb.Start(nil); err != nil {
+ if err := channelArb.Start(nil, c.beat); err != nil {
return err
}
@@ -1340,8 +1203,9 @@ func (c *ChainArbitrator) FindOutgoingHTLCDeadline(scid lnwire.ShortChannelID,
log.Debugf("ChannelArbitrator(%v): found "+
"incoming HTLC in channel=%v using "+
- "rHash=%x, refundTimeout=%v", scid,
- cp, rHash, htlc.RefundTimeout)
+ "rHash=%x, refundTimeout=%v",
+ channelArb.id(), cp, rHash,
+ htlc.RefundTimeout)
return fn.Some(int32(htlc.RefundTimeout))
}
@@ -1358,3 +1222,191 @@ func (c *ChainArbitrator) FindOutgoingHTLCDeadline(scid lnwire.ShortChannelID,
// TODO(roasbeef): arbitration reports
// * types: contested, waiting for success conf, etc
+
+// NOTE: part of the `chainio.Consumer` interface.
+func (c *ChainArbitrator) Name() string {
+ return "ChainArbitrator"
+}
+
+// loadOpenChannels loads all channels that are currently open in the database
+// and registers them with the chainWatcher for future notification.
+func (c *ChainArbitrator) loadOpenChannels() error {
+ openChannels, err := c.chanSource.ChannelStateDB().FetchAllChannels()
+ if err != nil {
+ return err
+ }
+
+ if len(openChannels) == 0 {
+ return nil
+ }
+
+ log.Infof("Creating ChannelArbitrators for %v active channels",
+ len(openChannels))
+
+ // For each open channel, we'll configure then launch a corresponding
+ // ChannelArbitrator.
+ for _, channel := range openChannels {
+ chanPoint := channel.FundingOutpoint
+ channel := channel
+
+ // First, we'll create an active chainWatcher for this channel
+ // to ensure that we detect any relevant on chain events.
+ breachClosure := func(ret *lnwallet.BreachRetribution) error {
+ return c.cfg.ContractBreach(chanPoint, ret)
+ }
+
+ chainWatcher, err := newChainWatcher(
+ chainWatcherConfig{
+ chanState: channel,
+ notifier: c.cfg.Notifier,
+ signer: c.cfg.Signer,
+ isOurAddr: c.cfg.IsOurAddress,
+ contractBreach: breachClosure,
+ extractStateNumHint: lnwallet.GetStateNumHint,
+ auxLeafStore: c.cfg.AuxLeafStore,
+ auxResolver: c.cfg.AuxResolver,
+ },
+ )
+ if err != nil {
+ return err
+ }
+
+ c.activeWatchers[chanPoint] = chainWatcher
+ channelArb, err := newActiveChannelArbitrator(
+ channel, c, chainWatcher.SubscribeChannelEvents(),
+ )
+ if err != nil {
+ return err
+ }
+
+ c.activeChannels[chanPoint] = channelArb
+
+ // Republish any closing transactions for this channel.
+ err = c.republishClosingTxs(channel)
+ if err != nil {
+ log.Errorf("Failed to republish closing txs for "+
+ "channel %v", chanPoint)
+ }
+ }
+
+ return nil
+}
+
+// loadPendingCloseChannels loads all channels that are currently pending
+// closure in the database and registers them with the ChannelArbitrator to
+// continue the resolution process.
+func (c *ChainArbitrator) loadPendingCloseChannels() error {
+ chanStateDB := c.chanSource.ChannelStateDB()
+
+ closingChannels, err := chanStateDB.FetchClosedChannels(true)
+ if err != nil {
+ return err
+ }
+
+ if len(closingChannels) == 0 {
+ return nil
+ }
+
+ log.Infof("Creating ChannelArbitrators for %v closing channels",
+ len(closingChannels))
+
+ // Next, for each channel is the closing state, we'll launch a
+ // corresponding more restricted resolver, as we don't have to watch
+ // the chain any longer, only resolve the contracts on the confirmed
+ // commitment.
+ //nolint:lll
+ for _, closeChanInfo := range closingChannels {
+ // We can leave off the CloseContract and ForceCloseChan
+ // methods as the channel is already closed at this point.
+ chanPoint := closeChanInfo.ChanPoint
+ arbCfg := ChannelArbitratorConfig{
+ ChanPoint: chanPoint,
+ ShortChanID: closeChanInfo.ShortChanID,
+ ChainArbitratorConfig: c.cfg,
+ ChainEvents: &ChainEventSubscription{},
+ IsPendingClose: true,
+ ClosingHeight: closeChanInfo.CloseHeight,
+ CloseType: closeChanInfo.CloseType,
+ PutResolverReport: func(tx kvdb.RwTx,
+ report *channeldb.ResolverReport) error {
+
+ return c.chanSource.PutResolverReport(
+ tx, c.cfg.ChainHash, &chanPoint, report,
+ )
+ },
+ FetchHistoricalChannel: func() (*channeldb.OpenChannel, error) {
+ return chanStateDB.FetchHistoricalChannel(&chanPoint)
+ },
+ FindOutgoingHTLCDeadline: func(
+ htlc channeldb.HTLC) fn.Option[int32] {
+
+ return c.FindOutgoingHTLCDeadline(
+ closeChanInfo.ShortChanID, htlc,
+ )
+ },
+ }
+ chanLog, err := newBoltArbitratorLog(
+ c.chanSource.Backend, arbCfg, c.cfg.ChainHash, chanPoint,
+ )
+ if err != nil {
+ return err
+ }
+ arbCfg.MarkChannelResolved = func() error {
+ if c.cfg.NotifyFullyResolvedChannel != nil {
+ c.cfg.NotifyFullyResolvedChannel(chanPoint)
+ }
+
+ return c.ResolveContract(chanPoint)
+ }
+
+ // We create an empty map of HTLC's here since it's possible
+ // that the channel is in StateDefault and updateActiveHTLCs is
+ // called. We want to avoid writing to an empty map. Since the
+ // channel is already in the process of being resolved, no new
+ // HTLCs will be added.
+ c.activeChannels[chanPoint] = NewChannelArbitrator(
+ arbCfg, make(map[HtlcSetKey]htlcSet), chanLog,
+ )
+ }
+
+ return nil
+}
+
+// RedispatchBlockbeat resends the current blockbeat to the channels specified
+// by the chanPoints. It is used when a channel is added to the chain
+// arbitrator after it has been started, e.g., during the channel restore
+// process.
+func (c *ChainArbitrator) RedispatchBlockbeat(chanPoints []wire.OutPoint) {
+ // Get the current blockbeat.
+ beat := c.beat
+
+ // Prepare two sets of consumers.
+ channels := make([]chainio.Consumer, 0, len(chanPoints))
+ watchers := make([]chainio.Consumer, 0, len(chanPoints))
+
+ // Read the active channels in a lock.
+ c.Lock()
+ for _, op := range chanPoints {
+ if channel, ok := c.activeChannels[op]; ok {
+ channels = append(channels, channel)
+ }
+
+ if watcher, ok := c.activeWatchers[op]; ok {
+ watchers = append(watchers, watcher)
+ }
+ }
+ c.Unlock()
+
+ // Iterate all the copied watchers and send the blockbeat to them.
+ err := chainio.DispatchConcurrent(beat, watchers)
+ if err != nil {
+ log.Errorf("Notify blockbeat failed: %v", err)
+ }
+
+ // Iterate all the copied channels and send the blockbeat to them.
+ err = chainio.DispatchConcurrent(beat, channels)
+ if err != nil {
+ // Shutdown lnd if there's an error processing the block.
+ log.Errorf("Notify blockbeat failed: %v", err)
+ }
+}
diff --git a/contractcourt/chain_arbitrator_test.go b/contractcourt/chain_arbitrator_test.go
index abaca5c2ba..a6b60a9a21 100644
--- a/contractcourt/chain_arbitrator_test.go
+++ b/contractcourt/chain_arbitrator_test.go
@@ -83,7 +83,6 @@ func TestChainArbitratorRepublishCloses(t *testing.T) {
ChainIO: &mock.ChainIO{},
Notifier: &mock.ChainNotifier{
SpendChan: make(chan *chainntnfs.SpendDetail),
- EpochChan: make(chan *chainntnfs.BlockEpoch),
ConfChan: make(chan *chainntnfs.TxConfirmation),
},
PublishTx: func(tx *wire.MsgTx, _ string) error {
@@ -97,7 +96,8 @@ func TestChainArbitratorRepublishCloses(t *testing.T) {
chainArbCfg, db,
)
- if err := chainArb.Start(); err != nil {
+ beat := newBeatFromHeight(0)
+ if err := chainArb.Start(beat); err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
@@ -168,7 +168,6 @@ func TestResolveContract(t *testing.T) {
ChainIO: &mock.ChainIO{},
Notifier: &mock.ChainNotifier{
SpendChan: make(chan *chainntnfs.SpendDetail),
- EpochChan: make(chan *chainntnfs.BlockEpoch),
ConfChan: make(chan *chainntnfs.TxConfirmation),
},
PublishTx: func(tx *wire.MsgTx, _ string) error {
@@ -185,7 +184,8 @@ func TestResolveContract(t *testing.T) {
chainArb := NewChainArbitrator(
chainArbCfg, db,
)
- if err := chainArb.Start(); err != nil {
+ beat := newBeatFromHeight(0)
+ if err := chainArb.Start(beat); err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
diff --git a/contractcourt/chain_watcher.go b/contractcourt/chain_watcher.go
index 64307dd020..7265dd0578 100644
--- a/contractcourt/chain_watcher.go
+++ b/contractcourt/chain_watcher.go
@@ -16,6 +16,7 @@ import (
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/davecgh/go-spew/spew"
+ "github.com/lightningnetwork/lnd/chainio"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/fn"
@@ -210,6 +211,10 @@ type chainWatcher struct {
started int32 // To be used atomically.
stopped int32 // To be used atomically.
+ // Embed the blockbeat consumer struct to get access to the method
+ // `NotifyBlockProcessed` and the `BlockbeatChan`.
+ chainio.BeatConsumer
+
quit chan struct{}
wg sync.WaitGroup
@@ -236,6 +241,10 @@ type chainWatcher struct {
// clientSubscriptions is a map that keeps track of all the active
// client subscriptions for events related to this channel.
clientSubscriptions map[uint64]*ChainEventSubscription
+
+ // fundingSpendNtfn is the spending notification subscription for the
+ // funding outpoint.
+ fundingSpendNtfn *chainntnfs.SpendEvent
}
// newChainWatcher returns a new instance of a chainWatcher for a channel given
@@ -260,12 +269,48 @@ func newChainWatcher(cfg chainWatcherConfig) (*chainWatcher, error) {
)
}
- return &chainWatcher{
+ // Get the witness program for the funding output.
+ fundingPkScript, err := deriveFundingPkScript(chanState)
+ if err != nil {
+ return nil, err
+ }
+
+ // Get the channel opening block height.
+ heightHint := deriveHeightHint(chanState)
+
+ // We'll register for a notification to be dispatched if the funding
+ // output is spent.
+ spendNtfn, err := cfg.notifier.RegisterSpendNtfn(
+ &chanState.FundingOutpoint, fundingPkScript, heightHint,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ c := &chainWatcher{
cfg: cfg,
stateHintObfuscator: stateHint,
quit: make(chan struct{}),
clientSubscriptions: make(map[uint64]*ChainEventSubscription),
- }, nil
+ fundingPkScript: fundingPkScript,
+ heightHint: heightHint,
+ fundingSpendNtfn: spendNtfn,
+ }
+
+ // Mount the block consumer.
+ c.BeatConsumer = chainio.NewBeatConsumer(c.quit, c.Name())
+
+ return c, nil
+}
+
+// Compile-time check for the chainio.Consumer interface.
+var _ chainio.Consumer = (*chainWatcher)(nil)
+
+// Name returns the name of the watcher.
+//
+// NOTE: part of the `chainio.Consumer` interface.
+func (c *chainWatcher) Name() string {
+ return fmt.Sprintf("ChainWatcher(%v)", c.cfg.chanState.FundingOutpoint)
}
// Start starts all goroutines that the chainWatcher needs to perform its
@@ -275,75 +320,11 @@ func (c *chainWatcher) Start() error {
return nil
}
- chanState := c.cfg.chanState
log.Debugf("Starting chain watcher for ChannelPoint(%v)",
- chanState.FundingOutpoint)
-
- // First, we'll register for a notification to be dispatched if the
- // funding output is spent.
- fundingOut := &chanState.FundingOutpoint
-
- // As a height hint, we'll try to use the opening height, but if the
- // channel isn't yet open, then we'll use the height it was broadcast
- // at. This may be an unconfirmed zero-conf channel.
- c.heightHint = c.cfg.chanState.ShortChanID().BlockHeight
- if c.heightHint == 0 {
- c.heightHint = chanState.BroadcastHeight()
- }
-
- // Since no zero-conf state is stored in a channel backup, the below
- // logic will not be triggered for restored, zero-conf channels. Set
- // the height hint for zero-conf channels.
- if chanState.IsZeroConf() {
- if chanState.ZeroConfConfirmed() {
- // If the zero-conf channel is confirmed, we'll use the
- // confirmed SCID's block height.
- c.heightHint = chanState.ZeroConfRealScid().BlockHeight
- } else {
- // The zero-conf channel is unconfirmed. We'll need to
- // use the FundingBroadcastHeight.
- c.heightHint = chanState.BroadcastHeight()
- }
- }
-
- localKey := chanState.LocalChanCfg.MultiSigKey.PubKey
- remoteKey := chanState.RemoteChanCfg.MultiSigKey.PubKey
-
- var (
- err error
- )
- if chanState.ChanType.IsTaproot() {
- c.fundingPkScript, _, err = input.GenTaprootFundingScript(
- localKey, remoteKey, 0, chanState.TapscriptRoot,
- )
- if err != nil {
- return err
- }
- } else {
- multiSigScript, err := input.GenMultiSigScript(
- localKey.SerializeCompressed(),
- remoteKey.SerializeCompressed(),
- )
- if err != nil {
- return err
- }
- c.fundingPkScript, err = input.WitnessScriptHash(multiSigScript)
- if err != nil {
- return err
- }
- }
-
- spendNtfn, err := c.cfg.notifier.RegisterSpendNtfn(
- fundingOut, c.fundingPkScript, c.heightHint,
- )
- if err != nil {
- return err
- }
+ c.cfg.chanState.FundingOutpoint)
- // With the spend notification obtained, we'll now dispatch the
- // closeObserver which will properly react to any changes.
c.wg.Add(1)
- go c.closeObserver(spendNtfn)
+ go c.closeObserver()
return nil
}
@@ -555,7 +536,7 @@ func newChainSet(chanState *channeldb.OpenChannel) (*chainSet, error) {
localCommit, remoteCommit, err := chanState.LatestCommitments()
if err != nil {
return nil, fmt.Errorf("unable to fetch channel state for "+
- "chan_point=%v", chanState.FundingOutpoint)
+ "chan_point=%v: %v", chanState.FundingOutpoint, err)
}
log.Tracef("ChannelPoint(%v): local_commit_type=%v, local_commit=%v",
@@ -622,167 +603,44 @@ func newChainSet(chanState *channeldb.OpenChannel) (*chainSet, error) {
// close observer will assembled the proper materials required to claim the
// funds of the channel on-chain (if required), then dispatch these as
// notifications to all subscribers.
-func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) {
+func (c *chainWatcher) closeObserver() {
defer c.wg.Done()
+ defer c.fundingSpendNtfn.Cancel()
log.Infof("Close observer for ChannelPoint(%v) active",
c.cfg.chanState.FundingOutpoint)
- // If this is a taproot channel, before we proceed, we want to ensure
- // that the expected funding output has confirmed on chain.
- if c.cfg.chanState.ChanType.IsTaproot() {
- fundingPoint := c.cfg.chanState.FundingOutpoint
-
- confNtfn, err := c.cfg.notifier.RegisterConfirmationsNtfn(
- &fundingPoint.Hash, c.fundingPkScript, 1, c.heightHint,
- )
- if err != nil {
- log.Warnf("unable to register for conf: %v", err)
- }
-
- log.Infof("Waiting for taproot ChannelPoint(%v) to confirm...",
- c.cfg.chanState.FundingOutpoint)
-
+ for {
select {
- case _, ok := <-confNtfn.Confirmed:
+ // A new block is received, we will check whether this block
+ // contains a spending tx that we are interested in.
+ case beat := <-c.BlockbeatChan:
+ log.Debugf("ChainWatcher(%v) received blockbeat %v",
+ c.cfg.chanState.FundingOutpoint, beat.Height())
+
+ // Process the block.
+ c.handleBlockbeat(beat)
+
+ // If the funding outpoint is spent, we now go ahead and handle
+ // it.
+ case spend, ok := <-c.fundingSpendNtfn.Spend:
// If the channel was closed, then this means that the
// notifier exited, so we will as well.
if !ok {
return
}
- case <-c.quit:
- return
- }
- }
-
- select {
- // We've detected a spend of the channel onchain! Depending on the type
- // of spend, we'll act accordingly, so we'll examine the spending
- // transaction to determine what we should do.
- //
- // TODO(Roasbeef): need to be able to ensure this only triggers
- // on confirmation, to ensure if multiple txns are broadcast, we
- // act on the one that's timestamped
- case commitSpend, ok := <-spendNtfn.Spend:
- // If the channel was closed, then this means that the notifier
- // exited, so we will as well.
- if !ok {
- return
- }
-
- // Otherwise, the remote party might have broadcast a prior
- // revoked state...!!!
- commitTxBroadcast := commitSpend.SpendingTx
-
- // First, we'll construct the chainset which includes all the
- // data we need to dispatch an event to our subscribers about
- // this possible channel close event.
- chainSet, err := newChainSet(c.cfg.chanState)
- if err != nil {
- log.Errorf("unable to create commit set: %v", err)
- return
- }
-
- // Decode the state hint encoded within the commitment
- // transaction to determine if this is a revoked state or not.
- obfuscator := c.stateHintObfuscator
- broadcastStateNum := c.cfg.extractStateNumHint(
- commitTxBroadcast, obfuscator,
- )
-
- // We'll go on to check whether it could be our own commitment
- // that was published and know is confirmed.
- ok, err = c.handleKnownLocalState(
- commitSpend, broadcastStateNum, chainSet,
- )
- if err != nil {
- log.Errorf("Unable to handle known local state: %v",
- err)
- return
- }
- if ok {
- return
- }
-
- // Now that we know it is neither a non-cooperative closure nor
- // a local close with the latest state, we check if it is the
- // remote that closed with any prior or current state.
- ok, err = c.handleKnownRemoteState(
- commitSpend, broadcastStateNum, chainSet,
- )
- if err != nil {
- log.Errorf("Unable to handle known remote state: %v",
- err)
- return
- }
-
- if ok {
- return
- }
-
- // Next, we'll check to see if this is a cooperative channel
- // closure or not. This is characterized by having an input
- // sequence number that's finalized. This won't happen with
- // regular commitment transactions due to the state hint
- // encoding scheme.
- switch commitTxBroadcast.TxIn[0].Sequence {
- case wire.MaxTxInSequenceNum:
- fallthrough
- case mempool.MaxRBFSequence:
- // TODO(roasbeef): rare but possible, need itest case
- // for
- err := c.dispatchCooperativeClose(commitSpend)
+ err := c.handleCommitSpend(spend)
if err != nil {
- log.Errorf("unable to handle co op close: %v", err)
+ log.Errorf("Failed to handle commit spend: %v",
+ err)
}
- return
- }
-
- log.Warnf("Unknown commitment broadcast for "+
- "ChannelPoint(%v) ", c.cfg.chanState.FundingOutpoint)
- // We'll try to recover as best as possible from losing state.
- // We first check if this was a local unknown state. This could
- // happen if we force close, then lose state or attempt
- // recovery before the commitment confirms.
- ok, err = c.handleUnknownLocalState(
- commitSpend, broadcastStateNum, chainSet,
- )
- if err != nil {
- log.Errorf("Unable to handle known local state: %v",
- err)
- return
- }
-
- if ok {
- return
- }
-
- // Since it was neither a known remote state, nor a local state
- // that was published, it most likely mean we lost state and
- // the remote node closed. In this case we must start the DLP
- // protocol in hope of getting our money back.
- ok, err = c.handleUnknownRemoteState(
- commitSpend, broadcastStateNum, chainSet,
- )
- if err != nil {
- log.Errorf("Unable to handle unknown remote state: %v",
- err)
- return
- }
-
- if ok {
+ // The chainWatcher has been signalled to exit, so we'll do so
+ // now.
+ case <-c.quit:
return
}
-
- log.Warnf("Unable to handle spending tx %v of channel point %v",
- commitTxBroadcast.TxHash(), c.cfg.chanState.FundingOutpoint)
- return
-
- // The chainWatcher has been signalled to exit, so we'll do so now.
- case <-c.quit:
- return
}
}
@@ -1399,3 +1257,264 @@ func (c *chainWatcher) waitForCommitmentPoint() *btcec.PublicKey {
}
}
}
+
+// deriveFundingPkScript derives the script used in the funding output.
+func deriveFundingPkScript(chanState *channeldb.OpenChannel) ([]byte, error) {
+ localKey := chanState.LocalChanCfg.MultiSigKey.PubKey
+ remoteKey := chanState.RemoteChanCfg.MultiSigKey.PubKey
+
+ var (
+ err error
+ fundingPkScript []byte
+ )
+
+ if chanState.ChanType.IsTaproot() {
+ fundingPkScript, _, err = input.GenTaprootFundingScript(
+ localKey, remoteKey, 0, chanState.TapscriptRoot,
+ )
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ multiSigScript, err := input.GenMultiSigScript(
+ localKey.SerializeCompressed(),
+ remoteKey.SerializeCompressed(),
+ )
+ if err != nil {
+ return nil, err
+ }
+ fundingPkScript, err = input.WitnessScriptHash(multiSigScript)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return fundingPkScript, nil
+}
+
+// deriveHeightHint derives the block height for the channel opening.
+func deriveHeightHint(chanState *channeldb.OpenChannel) uint32 {
+ // As a height hint, we'll try to use the opening height, but if the
+ // channel isn't yet open, then we'll use the height it was broadcast
+ // at. This may be an unconfirmed zero-conf channel.
+ heightHint := chanState.ShortChanID().BlockHeight
+ if heightHint == 0 {
+ heightHint = chanState.BroadcastHeight()
+ }
+
+ // Since no zero-conf state is stored in a channel backup, the below
+ // logic will not be triggered for restored, zero-conf channels. Set
+ // the height hint for zero-conf channels.
+ if chanState.IsZeroConf() {
+ if chanState.ZeroConfConfirmed() {
+ // If the zero-conf channel is confirmed, we'll use the
+ // confirmed SCID's block height.
+ heightHint = chanState.ZeroConfRealScid().BlockHeight
+ } else {
+ // The zero-conf channel is unconfirmed. We'll need to
+ // use the FundingBroadcastHeight.
+ heightHint = chanState.BroadcastHeight()
+ }
+ }
+
+ return heightHint
+}
+
+// handleCommitSpend takes a spending tx of the funding output and handles the
+// channel close based on the closure type.
+func (c *chainWatcher) handleCommitSpend(
+ commitSpend *chainntnfs.SpendDetail) error {
+
+ commitTxBroadcast := commitSpend.SpendingTx
+
+ // First, we'll construct the chainset which includes all the data we
+ // need to dispatch an event to our subscribers about this possible
+ // channel close event.
+ chainSet, err := newChainSet(c.cfg.chanState)
+ if err != nil {
+ return fmt.Errorf("create commit set: %w", err)
+ }
+
+ // Decode the state hint encoded within the commitment transaction to
+ // determine if this is a revoked state or not.
+ obfuscator := c.stateHintObfuscator
+ broadcastStateNum := c.cfg.extractStateNumHint(
+ commitTxBroadcast, obfuscator,
+ )
+
+ // We'll go on to check whether it could be our own commitment that was
+ // published and know is confirmed.
+ ok, err := c.handleKnownLocalState(
+ commitSpend, broadcastStateNum, chainSet,
+ )
+ if err != nil {
+ return fmt.Errorf("handle known local state: %w", err)
+ }
+ if ok {
+ return nil
+ }
+
+ // Now that we know it is neither a non-cooperative closure nor a local
+ // close with the latest state, we check if it is the remote that
+ // closed with any prior or current state.
+ ok, err = c.handleKnownRemoteState(
+ commitSpend, broadcastStateNum, chainSet,
+ )
+ if err != nil {
+ return fmt.Errorf("handle known remote state: %w", err)
+ }
+ if ok {
+ return nil
+ }
+
+ // Next, we'll check to see if this is a cooperative channel closure or
+ // not. This is characterized by having an input sequence number that's
+ // finalized. This won't happen with regular commitment transactions
+ // due to the state hint encoding scheme.
+ switch commitTxBroadcast.TxIn[0].Sequence {
+ case wire.MaxTxInSequenceNum:
+ fallthrough
+ case mempool.MaxRBFSequence:
+ // TODO(roasbeef): rare but possible, need itest case for
+ err := c.dispatchCooperativeClose(commitSpend)
+ if err != nil {
+ return fmt.Errorf("handle coop close: %w", err)
+ }
+
+ return nil
+ }
+
+ log.Warnf("Unknown commitment broadcast for ChannelPoint(%v) ",
+ c.cfg.chanState.FundingOutpoint)
+
+ // We'll try to recover as best as possible from losing state. We
+ // first check if this was a local unknown state. This could happen if
+ // we force close, then lose state or attempt recovery before the
+ // commitment confirms.
+ ok, err = c.handleUnknownLocalState(
+ commitSpend, broadcastStateNum, chainSet,
+ )
+ if err != nil {
+ return fmt.Errorf("handle known local state: %w", err)
+ }
+ if ok {
+ return nil
+ }
+
+ // Since it was neither a known remote state, nor a local state that
+ // was published, it most likely mean we lost state and the remote node
+ // closed. In this case we must start the DLP protocol in hope of
+ // getting our money back.
+ ok, err = c.handleUnknownRemoteState(
+ commitSpend, broadcastStateNum, chainSet,
+ )
+ if err != nil {
+ return fmt.Errorf("handle unknown remote state: %w", err)
+ }
+ if ok {
+ return nil
+ }
+
+ log.Errorf("Unable to handle spending tx %v of channel point %v",
+ commitTxBroadcast.TxHash(), c.cfg.chanState.FundingOutpoint)
+
+ return nil
+}
+
+// checkFundingSpend performs a non-blocking read on the spendNtfn channel to
+// check whether there's a commit spend already. Returns the spend details if
+// found.
+func (c *chainWatcher) checkFundingSpend() *chainntnfs.SpendDetail {
+ select {
+ // We've detected a spend of the channel onchain! Depending on the type
+ // of spend, we'll act accordingly, so we'll examine the spending
+ // transaction to determine what we should do.
+ //
+ // TODO(Roasbeef): need to be able to ensure this only triggers
+ // on confirmation, to ensure if multiple txns are broadcast, we
+ // act on the one that's timestamped
+ case spend, ok := <-c.fundingSpendNtfn.Spend:
+ // If the channel was closed, then this means that the notifier
+ // exited, so we will as well.
+ if !ok {
+ return nil
+ }
+
+ log.Debugf("Found spend details for funding output: %v",
+ spend.SpenderTxHash)
+
+ return spend
+
+ default:
+ }
+
+ return nil
+}
+
+// chanPointConfirmed checks whether the given channel point has confirmed.
+// This is used to ensure that the funding output has confirmed on chain before
+// we proceed with the rest of the close observer logic for taproot channels.
+func (c *chainWatcher) chanPointConfirmed() bool {
+ op := c.cfg.chanState.FundingOutpoint
+ confNtfn, err := c.cfg.notifier.RegisterConfirmationsNtfn(
+ &op.Hash, c.fundingPkScript, 1, c.heightHint,
+ )
+ if err != nil {
+ log.Errorf("Unable to register for conf: %v", err)
+
+ return false
+ }
+
+ select {
+ case _, ok := <-confNtfn.Confirmed:
+ // If the channel was closed, then this means that the notifier
+ // exited, so we will as well.
+ if !ok {
+ return false
+ }
+
+ log.Debugf("Taproot ChannelPoint(%v) confirmed", op)
+
+ return true
+
+ default:
+ log.Infof("Taproot ChannelPoint(%v) not confirmed yet", op)
+
+ return false
+ }
+}
+
+// handleBlockbeat takes a blockbeat and queries for a spending tx for the
+// funding output. If the spending tx is found, it will be handled based on the
+// closure type.
+func (c *chainWatcher) handleBlockbeat(beat chainio.Blockbeat) {
+ // Notify the chain arbitrator has processed the block.
+ defer c.NotifyBlockProcessed(beat, nil)
+
+ // If this is a taproot channel, before we proceed, we want to ensure
+ // that the expected funding output has confirmed on chain.
+ if c.cfg.chanState.IsPending && c.cfg.chanState.ChanType.IsTaproot() {
+ // If the funding output hasn't confirmed in this block, we
+ // will check it again in the next block.
+ if !c.chanPointConfirmed() {
+ return
+ }
+ }
+
+ // Perform a non-blocking read to check whether the funding output was
+ // spent.
+ spend := c.checkFundingSpend()
+ if spend == nil {
+ log.Tracef("No spend found for ChannelPoint(%v) in block %v",
+ c.cfg.chanState.FundingOutpoint, beat.Height())
+
+ return
+ }
+
+ // The funding output was spent, we now handle it by sending a close
+ // event to the channel arbitrator.
+ err := c.handleCommitSpend(spend)
+ if err != nil {
+ log.Errorf("Failed to handle commit spend: %v", err)
+ }
+}
diff --git a/contractcourt/chain_watcher_test.go b/contractcourt/chain_watcher_test.go
index 2781170f0c..f38cadb601 100644
--- a/contractcourt/chain_watcher_test.go
+++ b/contractcourt/chain_watcher_test.go
@@ -9,10 +9,11 @@ import (
"time"
"github.com/btcsuite/btcd/wire"
+ "github.com/lightningnetwork/lnd/chainio"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/input"
- "github.com/lightningnetwork/lnd/lntest/mock"
+ lnmock "github.com/lightningnetwork/lnd/lntest/mock"
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/stretchr/testify/require"
@@ -33,8 +34,8 @@ func TestChainWatcherRemoteUnilateralClose(t *testing.T) {
// With the channels created, we'll now create a chain watcher instance
// which will be watching for any closes of Alice's channel.
- aliceNotifier := &mock.ChainNotifier{
- SpendChan: make(chan *chainntnfs.SpendDetail),
+ aliceNotifier := &lnmock.ChainNotifier{
+ SpendChan: make(chan *chainntnfs.SpendDetail, 1),
EpochChan: make(chan *chainntnfs.BlockEpoch),
ConfChan: make(chan *chainntnfs.TxConfirmation),
}
@@ -49,6 +50,20 @@ func TestChainWatcherRemoteUnilateralClose(t *testing.T) {
require.NoError(t, err, "unable to start chain watcher")
defer aliceChainWatcher.Stop()
+ // Create a mock blockbeat and send it to Alice's BlockbeatChan.
+ mockBeat := &chainio.MockBlockbeat{}
+
+ // Mock the logger. We don't care how many times it's called as it's
+ // not critical.
+ mockBeat.On("logger").Return(log)
+
+ // Mock a fake block height - this is called based on the debuglevel.
+ mockBeat.On("Height").Return(int32(1)).Maybe()
+
+ // Mock `NotifyBlockProcessed` to be call once.
+ mockBeat.On("NotifyBlockProcessed",
+ nil, aliceChainWatcher.quit).Return().Once()
+
// We'll request a new channel event subscription from Alice's chain
// watcher.
chanEvents := aliceChainWatcher.SubscribeChannelEvents()
@@ -61,7 +76,19 @@ func TestChainWatcherRemoteUnilateralClose(t *testing.T) {
SpenderTxHash: &bobTxHash,
SpendingTx: bobCommit,
}
- aliceNotifier.SpendChan <- bobSpend
+
+ // Here we mock the behavior of a restart.
+ select {
+ case aliceNotifier.SpendChan <- bobSpend:
+ case <-time.After(1 * time.Second):
+ t.Fatalf("unable to send spend details")
+ }
+
+ select {
+ case aliceChainWatcher.BlockbeatChan <- mockBeat:
+ case <-time.After(time.Second * 1):
+ t.Fatalf("unable to send blockbeat")
+ }
// We should get a new spend event over the remote unilateral close
// event channel.
@@ -117,7 +144,7 @@ func TestChainWatcherRemoteUnilateralClosePendingCommit(t *testing.T) {
// With the channels created, we'll now create a chain watcher instance
// which will be watching for any closes of Alice's channel.
- aliceNotifier := &mock.ChainNotifier{
+ aliceNotifier := &lnmock.ChainNotifier{
SpendChan: make(chan *chainntnfs.SpendDetail),
EpochChan: make(chan *chainntnfs.BlockEpoch),
ConfChan: make(chan *chainntnfs.TxConfirmation),
@@ -165,7 +192,32 @@ func TestChainWatcherRemoteUnilateralClosePendingCommit(t *testing.T) {
SpenderTxHash: &bobTxHash,
SpendingTx: bobCommit,
}
- aliceNotifier.SpendChan <- bobSpend
+
+ // Create a mock blockbeat and send it to Alice's BlockbeatChan.
+ mockBeat := &chainio.MockBlockbeat{}
+
+ // Mock the logger. We don't care how many times it's called as it's
+ // not critical.
+ mockBeat.On("logger").Return(log)
+
+ // Mock a fake block height - this is called based on the debuglevel.
+ mockBeat.On("Height").Return(int32(1)).Maybe()
+
+ // Mock `NotifyBlockProcessed` to be call once.
+ mockBeat.On("NotifyBlockProcessed",
+ nil, aliceChainWatcher.quit).Return().Once()
+
+ select {
+ case aliceNotifier.SpendChan <- bobSpend:
+ case <-time.After(1 * time.Second):
+ t.Fatalf("unable to send spend details")
+ }
+
+ select {
+ case aliceChainWatcher.BlockbeatChan <- mockBeat:
+ case <-time.After(time.Second * 1):
+ t.Fatalf("unable to send blockbeat")
+ }
// We should get a new spend event over the remote unilateral close
// event channel.
@@ -279,7 +331,7 @@ func TestChainWatcherDataLossProtect(t *testing.T) {
// With the channels created, we'll now create a chain watcher
// instance which will be watching for any closes of Alice's
// channel.
- aliceNotifier := &mock.ChainNotifier{
+ aliceNotifier := &lnmock.ChainNotifier{
SpendChan: make(chan *chainntnfs.SpendDetail),
EpochChan: make(chan *chainntnfs.BlockEpoch),
ConfChan: make(chan *chainntnfs.TxConfirmation),
@@ -326,7 +378,34 @@ func TestChainWatcherDataLossProtect(t *testing.T) {
SpenderTxHash: &bobTxHash,
SpendingTx: bobCommit,
}
- aliceNotifier.SpendChan <- bobSpend
+
+ // Create a mock blockbeat and send it to Alice's
+ // BlockbeatChan.
+ mockBeat := &chainio.MockBlockbeat{}
+
+ // Mock the logger. We don't care how many times it's called as
+ // it's not critical.
+ mockBeat.On("logger").Return(log)
+
+ // Mock a fake block height - this is called based on the
+ // debuglevel.
+ mockBeat.On("Height").Return(int32(1)).Maybe()
+
+ // Mock `NotifyBlockProcessed` to be call once.
+ mockBeat.On("NotifyBlockProcessed",
+ nil, aliceChainWatcher.quit).Return().Once()
+
+ select {
+ case aliceNotifier.SpendChan <- bobSpend:
+ case <-time.After(time.Second * 1):
+ t.Fatalf("failed to send spend notification")
+ }
+
+ select {
+ case aliceChainWatcher.BlockbeatChan <- mockBeat:
+ case <-time.After(time.Second * 1):
+ t.Fatalf("unable to send blockbeat")
+ }
// We should get a new uni close resolution that indicates we
// processed the DLP scenario.
@@ -453,7 +532,7 @@ func TestChainWatcherLocalForceCloseDetect(t *testing.T) {
// With the channels created, we'll now create a chain watcher
// instance which will be watching for any closes of Alice's
// channel.
- aliceNotifier := &mock.ChainNotifier{
+ aliceNotifier := &lnmock.ChainNotifier{
SpendChan: make(chan *chainntnfs.SpendDetail),
EpochChan: make(chan *chainntnfs.BlockEpoch),
ConfChan: make(chan *chainntnfs.TxConfirmation),
@@ -497,7 +576,33 @@ func TestChainWatcherLocalForceCloseDetect(t *testing.T) {
SpenderTxHash: &aliceTxHash,
SpendingTx: aliceCommit,
}
- aliceNotifier.SpendChan <- aliceSpend
+ // Create a mock blockbeat and send it to Alice's
+ // BlockbeatChan.
+ mockBeat := &chainio.MockBlockbeat{}
+
+ // Mock the logger. We don't care how many times it's called as
+ // it's not critical.
+ mockBeat.On("logger").Return(log)
+
+ // Mock a fake block height - this is called based on the
+ // debuglevel.
+ mockBeat.On("Height").Return(int32(1)).Maybe()
+
+ // Mock `NotifyBlockProcessed` to be call once.
+ mockBeat.On("NotifyBlockProcessed",
+ nil, aliceChainWatcher.quit).Return().Once()
+
+ select {
+ case aliceNotifier.SpendChan <- aliceSpend:
+ case <-time.After(time.Second * 1):
+ t.Fatalf("unable to send spend notification")
+ }
+
+ select {
+ case aliceChainWatcher.BlockbeatChan <- mockBeat:
+ case <-time.After(time.Second * 1):
+ t.Fatalf("unable to send blockbeat")
+ }
// We should get a local force close event from Alice as she
// should be able to detect the close based on the commitment
diff --git a/contractcourt/channel_arbitrator.go b/contractcourt/channel_arbitrator.go
index cc1ee69589..a5f75413ca 100644
--- a/contractcourt/channel_arbitrator.go
+++ b/contractcourt/channel_arbitrator.go
@@ -14,6 +14,7 @@ import (
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
+ "github.com/lightningnetwork/lnd/chainio"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/models"
"github.com/lightningnetwork/lnd/fn"
@@ -330,6 +331,10 @@ type ChannelArbitrator struct {
started int32 // To be used atomically.
stopped int32 // To be used atomically.
+ // Embed the blockbeat consumer struct to get access to the method
+ // `NotifyBlockProcessed` and the `BlockbeatChan`.
+ chainio.BeatConsumer
+
// startTimestamp is the time when this ChannelArbitrator was started.
startTimestamp time.Time
@@ -352,11 +357,6 @@ type ChannelArbitrator struct {
// to do its duty.
cfg ChannelArbitratorConfig
- // blocks is a channel that the arbitrator will receive new blocks on.
- // This channel should be buffered by so that it does not block the
- // sender.
- blocks chan int32
-
// signalUpdates is a channel that any new live signals for the channel
// we're watching over will be sent.
signalUpdates chan *signalUpdateMsg
@@ -404,9 +404,8 @@ func NewChannelArbitrator(cfg ChannelArbitratorConfig,
unmerged[RemotePendingHtlcSet] = htlcSets[RemotePendingHtlcSet]
}
- return &ChannelArbitrator{
+ c := &ChannelArbitrator{
log: log,
- blocks: make(chan int32, arbitratorBlockBufferSize),
signalUpdates: make(chan *signalUpdateMsg),
resolutionSignal: make(chan struct{}),
forceCloseReqs: make(chan *forceCloseReq),
@@ -415,8 +414,16 @@ func NewChannelArbitrator(cfg ChannelArbitratorConfig,
cfg: cfg,
quit: make(chan struct{}),
}
+
+ // Mount the block consumer.
+ c.BeatConsumer = chainio.NewBeatConsumer(c.quit, c.Name())
+
+ return c
}
+// Compile-time check for the chainio.Consumer interface.
+var _ chainio.Consumer = (*ChannelArbitrator)(nil)
+
// chanArbStartState contains the information from disk that we need to start
// up a channel arbitrator.
type chanArbStartState struct {
@@ -455,7 +462,9 @@ func (c *ChannelArbitrator) getStartState(tx kvdb.RTx) (*chanArbStartState,
// Start starts all the goroutines that the ChannelArbitrator needs to operate.
// If takes a start state, which will be looked up on disk if it is not
// provided.
-func (c *ChannelArbitrator) Start(state *chanArbStartState) error {
+func (c *ChannelArbitrator) Start(state *chanArbStartState,
+ beat chainio.Blockbeat) error {
+
if !atomic.CompareAndSwapInt32(&c.started, 0, 1) {
return nil
}
@@ -470,17 +479,15 @@ func (c *ChannelArbitrator) Start(state *chanArbStartState) error {
}
}
- log.Debugf("Starting ChannelArbitrator(%v), htlc_set=%v, state=%v",
- c.cfg.ChanPoint, lnutils.SpewLogClosure(c.activeHTLCs),
+ log.Tracef("Starting ChannelArbitrator(%v), htlc_set=%v, state=%v",
+ c.id(), lnutils.SpewLogClosure(c.activeHTLCs),
state.currentState)
// Set our state from our starting state.
c.state = state.currentState
- _, bestHeight, err := c.cfg.ChainIO.GetBestBlock()
- if err != nil {
- return err
- }
+ // Get the starting height.
+ bestHeight := beat.Height()
// If the channel has been marked pending close in the database, and we
// haven't transitioned the state machine to StateContractClosed (or a
@@ -512,16 +519,14 @@ func (c *ChannelArbitrator) Start(state *chanArbStartState) error {
}
log.Warnf("ChannelArbitrator(%v): detected stalled "+
- "state=%v for closed channel",
- c.cfg.ChanPoint, c.state)
+ "state=%v for closed channel", c.id(), c.state)
}
triggerHeight = c.cfg.ClosingHeight
}
log.Infof("ChannelArbitrator(%v): starting state=%v, trigger=%v, "+
- "triggerHeight=%v", c.cfg.ChanPoint, c.state, trigger,
- triggerHeight)
+ "triggerHeight=%v", c.id(), c.state, trigger, triggerHeight)
// We'll now attempt to advance our state forward based on the current
// on-chain state, and our set of active contracts.
@@ -540,8 +545,8 @@ func (c *ChannelArbitrator) Start(state *chanArbStartState) error {
fallthrough
case errNoResolutions:
log.Warnf("ChannelArbitrator(%v): detected closed"+
- "channel with no contract resolutions written.",
- c.cfg.ChanPoint)
+ "channel with no contract resolutions written",
+ c.id())
default:
return err
@@ -730,14 +735,14 @@ func (c *ChannelArbitrator) relaunchResolvers(commitSet *CommitSet,
fallthrough
case err == channeldb.ErrChannelNotFound:
log.Warnf("ChannelArbitrator(%v): unable to fetch historical "+
- "state", c.cfg.ChanPoint)
+ "state", c.id())
case err != nil:
return err
}
log.Infof("ChannelArbitrator(%v): relaunching %v contract "+
- "resolvers", c.cfg.ChanPoint, len(unresolvedContracts))
+ "resolvers", c.id(), len(unresolvedContracts))
for i := range unresolvedContracts {
resolver := unresolvedContracts[i]
@@ -797,18 +802,15 @@ func (c *ChannelArbitrator) relaunchResolvers(commitSet *CommitSet,
// TODO(roasbeef): this isn't re-launched?
}
- c.launchResolvers(unresolvedContracts, true)
+ c.resolveContracts(unresolvedContracts)
return nil
}
// Report returns htlc reports for the active resolvers.
func (c *ChannelArbitrator) Report() []*ContractReport {
- c.activeResolversLock.RLock()
- defer c.activeResolversLock.RUnlock()
-
var reports []*ContractReport
- for _, resolver := range c.activeResolvers {
+ for _, resolver := range c.resolvers() {
r, ok := resolver.(reportingContractResolver)
if !ok {
continue
@@ -831,7 +833,7 @@ func (c *ChannelArbitrator) Stop() error {
return nil
}
- log.Debugf("Stopping ChannelArbitrator(%v)", c.cfg.ChanPoint)
+ log.Debugf("Stopping ChannelArbitrator(%v)", c.id())
if c.cfg.ChainEvents.Cancel != nil {
go c.cfg.ChainEvents.Cancel()
@@ -929,8 +931,7 @@ func (c *ChannelArbitrator) stateStep(
// to see if while we were down, conditions have changed.
case StateDefault:
log.Debugf("ChannelArbitrator(%v): new block (height=%v) "+
- "examining active HTLC's", c.cfg.ChanPoint,
- triggerHeight)
+ "examining active HTLC's", c.id(), triggerHeight)
// As a new block has been connected to the end of the main
// chain, we'll check to see if we need to make any on-chain
@@ -975,7 +976,7 @@ func (c *ChannelArbitrator) stateStep(
// checking due to a chain update), then we'll exit now.
if len(chainActions) == 0 && trigger == chainTrigger {
log.Debugf("ChannelArbitrator(%v): no actions for "+
- "chain trigger, terminating", c.cfg.ChanPoint)
+ "chain trigger, terminating", c.id())
return StateDefault, closeTx, nil
}
@@ -983,7 +984,7 @@ func (c *ChannelArbitrator) stateStep(
// Otherwise, we'll log that we checked the HTLC actions as the
// commitment transaction has already been broadcast.
log.Tracef("ChannelArbitrator(%v): logging chain_actions=%v",
- c.cfg.ChanPoint, lnutils.SpewLogClosure(chainActions))
+ c.id(), lnutils.SpewLogClosure(chainActions))
// Cancel upstream HTLCs for all outgoing dust HTLCs available
// either on the local or the remote/remote pending commitment
@@ -1030,7 +1031,7 @@ func (c *ChannelArbitrator) stateStep(
case localCloseTrigger:
log.Errorf("ChannelArbitrator(%v): unexpected local "+
"commitment confirmed while in StateDefault",
- c.cfg.ChanPoint)
+ c.id())
fallthrough
case remoteCloseTrigger:
nextState = StateContractClosed
@@ -1061,7 +1062,8 @@ func (c *ChannelArbitrator) stateStep(
log.Infof("ChannelArbitrator(%v): detected %s "+
"close after closing channel, fast-forwarding "+
"to %s to resolve contract",
- c.cfg.ChanPoint, trigger, StateContractClosed)
+ c.id(), trigger, StateContractClosed)
+
return StateContractClosed, closeTx, nil
case breachCloseTrigger:
@@ -1069,13 +1071,13 @@ func (c *ChannelArbitrator) stateStep(
if nextContractState == StateError {
log.Infof("ChannelArbitrator(%v): unable to "+
"advance breach close resolution: %v",
- c.cfg.ChanPoint, nextContractState)
+ c.id(), nextContractState)
return StateError, closeTx, err
}
log.Infof("ChannelArbitrator(%v): detected %s close "+
"after closing channel, fast-forwarding to %s"+
- " to resolve contract", c.cfg.ChanPoint,
+ " to resolve contract", c.id(),
trigger, nextContractState)
return nextContractState, closeTx, nil
@@ -1084,12 +1086,12 @@ func (c *ChannelArbitrator) stateStep(
log.Infof("ChannelArbitrator(%v): detected %s "+
"close after closing channel, fast-forwarding "+
"to %s to resolve contract",
- c.cfg.ChanPoint, trigger, StateFullyResolved)
+ c.id(), trigger, StateFullyResolved)
+
return StateFullyResolved, closeTx, nil
}
- log.Infof("ChannelArbitrator(%v): force closing "+
- "chan", c.cfg.ChanPoint)
+ log.Infof("ChannelArbitrator(%v): force closing chan", c.id())
// Now that we have all the actions decided for the set of
// HTLC's, we'll broadcast the commitment transaction, and
@@ -1101,7 +1103,7 @@ func (c *ChannelArbitrator) stateStep(
closeSummary, err := c.cfg.Channel.ForceCloseChan()
if err != nil {
log.Errorf("ChannelArbitrator(%v): unable to "+
- "force close: %v", c.cfg.ChanPoint, err)
+ "force close: %v", c.id(), err)
// We tried to force close (HTLC may be expiring from
// our PoV, etc), but we think we've lost data. In this
@@ -1111,7 +1113,7 @@ func (c *ChannelArbitrator) stateStep(
log.Error("ChannelArbitrator(%v): broadcast "+
"failed due to local data loss, "+
"waiting for on chain confimation...",
- c.cfg.ChanPoint)
+ c.id())
return StateBroadcastCommit, nil, nil
}
@@ -1127,8 +1129,8 @@ func (c *ChannelArbitrator) stateStep(
err = c.cfg.MarkCommitmentBroadcasted(closeTx, lntypes.Local)
if err != nil {
log.Errorf("ChannelArbitrator(%v): unable to "+
- "mark commitment broadcasted: %v",
- c.cfg.ChanPoint, err)
+ "mark commitment broadcasted: %v", c.id(), err)
+
return StateError, closeTx, err
}
@@ -1146,7 +1148,7 @@ func (c *ChannelArbitrator) stateStep(
)
if err := c.cfg.PublishTx(closeTx, label); err != nil {
log.Errorf("ChannelArbitrator(%v): unable to broadcast "+
- "close tx: %v", c.cfg.ChanPoint, err)
+ "close tx: %v", c.id(), err)
// This makes sure we don't fail at startup if the
// commitment transaction has too low fees to make it
@@ -1217,7 +1219,7 @@ func (c *ChannelArbitrator) stateStep(
}
log.Infof("ChannelArbitrator(%v): trigger %v moving from "+
- "state %v to %v", c.cfg.ChanPoint, trigger, c.state,
+ "state %v to %v", c.id(), trigger, c.state,
nextState)
// If we're in this state, then the contract has been fully closed to
@@ -1238,8 +1240,8 @@ func (c *ChannelArbitrator) stateStep(
// resolvers, and can go straight to our final state.
if contractResolutions.IsEmpty() && confCommitSet.IsEmpty() {
log.Infof("ChannelArbitrator(%v): contract "+
- "resolutions empty, marking channel as fully resolved!",
- c.cfg.ChanPoint)
+ "resolutions empty, marking channel as fully "+
+ "resolved!", c.id())
nextState = StateFullyResolved
break
}
@@ -1322,12 +1324,13 @@ func (c *ChannelArbitrator) stateStep(
)
if err != nil {
log.Errorf("ChannelArbitrator(%v): unable to "+
- "resolve contracts: %v", c.cfg.ChanPoint, err)
+ "resolve contracts: %v", c.id(), err)
+
return StateError, closeTx, err
}
log.Debugf("ChannelArbitrator(%v): inserting %v contract "+
- "resolvers", c.cfg.ChanPoint, len(resolvers))
+ "resolvers", c.id(), len(resolvers))
err = c.log.InsertUnresolvedContracts(nil, resolvers...)
if err != nil {
@@ -1336,7 +1339,7 @@ func (c *ChannelArbitrator) stateStep(
// Finally, we'll launch all the required contract resolvers.
// Once they're all resolved, we're no longer needed.
- c.launchResolvers(resolvers, false)
+ c.resolveContracts(resolvers)
nextState = StateWaitingFullResolution
@@ -1344,7 +1347,7 @@ func (c *ChannelArbitrator) stateStep(
// all contracts are fully resolved.
case StateWaitingFullResolution:
log.Infof("ChannelArbitrator(%v): still awaiting contract "+
- "resolution", c.cfg.ChanPoint)
+ "resolution", c.id())
unresolved, err := c.log.FetchUnresolvedContracts()
if err != nil {
@@ -1365,7 +1368,7 @@ func (c *ChannelArbitrator) stateStep(
// Add debug logs.
for _, r := range unresolved {
log.Debugf("ChannelArbitrator(%v): still have "+
- "unresolved contract: %T", c.cfg.ChanPoint, r)
+ "unresolved contract: %T", c.id(), r)
}
// If we start as fully resolved, then we'll end as fully resolved.
@@ -1384,8 +1387,7 @@ func (c *ChannelArbitrator) stateStep(
}
}
- log.Tracef("ChannelArbitrator(%v): next_state=%v", c.cfg.ChanPoint,
- nextState)
+ log.Tracef("ChannelArbitrator(%v): next_state=%v", c.id(), nextState)
return nextState, closeTx, nil
}
@@ -1449,8 +1451,7 @@ func (c *ChannelArbitrator) findCommitmentDeadlineAndValue(heightHint uint32,
// Skip if the HTLC is dust.
if htlc.OutputIndex < 0 {
log.Debugf("ChannelArbitrator(%v): skipped deadline "+
- "for dust htlc=%x",
- c.cfg.ChanPoint, htlc.RHash[:])
+ "for dust htlc=%x", c.id(), htlc.RHash[:])
continue
}
@@ -1476,7 +1477,7 @@ func (c *ChannelArbitrator) findCommitmentDeadlineAndValue(heightHint uint32,
deadlineMinHeight = deadline
log.Tracef("ChannelArbitrator(%v): outgoing HTLC has "+
- "deadline=%v, value=%v", c.cfg.ChanPoint,
+ "deadline=%v, value=%v", c.id(),
deadlineMinHeight, value)
}
}
@@ -1487,8 +1488,7 @@ func (c *ChannelArbitrator) findCommitmentDeadlineAndValue(heightHint uint32,
// Skip if the HTLC is dust.
if htlc.OutputIndex < 0 {
log.Debugf("ChannelArbitrator(%v): skipped deadline "+
- "for dust htlc=%x",
- c.cfg.ChanPoint, htlc.RHash[:])
+ "for dust htlc=%x", c.id(), htlc.RHash[:])
continue
}
@@ -1511,7 +1511,7 @@ func (c *ChannelArbitrator) findCommitmentDeadlineAndValue(heightHint uint32,
deadlineMinHeight = htlc.RefundTimeout
log.Tracef("ChannelArbitrator(%v): incoming HTLC has "+
- "deadline=%v, amt=%v", c.cfg.ChanPoint,
+ "deadline=%v, amt=%v", c.id(),
deadlineMinHeight, value)
}
}
@@ -1535,7 +1535,7 @@ func (c *ChannelArbitrator) findCommitmentDeadlineAndValue(heightHint uint32,
case deadlineMinHeight <= heightHint:
log.Warnf("ChannelArbitrator(%v): deadline is passed with "+
"deadlineMinHeight=%d, heightHint=%d",
- c.cfg.ChanPoint, deadlineMinHeight, heightHint)
+ c.id(), deadlineMinHeight, heightHint)
deadline = 1
// Use half of the deadline delta, and leave the other half to be used
@@ -1553,23 +1553,75 @@ func (c *ChannelArbitrator) findCommitmentDeadlineAndValue(heightHint uint32,
log.Debugf("ChannelArbitrator(%v): calculated valueLeft=%v, "+
"deadline=%d, using deadlineMinHeight=%d, heightHint=%d",
- c.cfg.ChanPoint, valueLeft, deadline, deadlineMinHeight,
- heightHint)
+ c.id(), valueLeft, deadline, deadlineMinHeight, heightHint)
return fn.Some(int32(deadline)), valueLeft, nil
}
-// launchResolvers updates the activeResolvers list and starts the resolvers.
-func (c *ChannelArbitrator) launchResolvers(resolvers []ContractResolver,
- immediate bool) {
-
+// resolveContracts updates the activeResolvers list and starts to resolve each
+// contract concurrently, and launches them.
+func (c *ChannelArbitrator) resolveContracts(resolvers []ContractResolver) {
+ // Update the active contract resolvers.
c.activeResolversLock.Lock()
- defer c.activeResolversLock.Unlock()
-
c.activeResolvers = resolvers
- for _, contract := range resolvers {
+ c.activeResolversLock.Unlock()
+
+ // Launch all resolvers.
+ c.launchResolvers()
+
+ for _, contract := range c.resolvers() {
c.wg.Add(1)
- go c.resolveContract(contract, immediate)
+ go c.resolveContract(contract)
+ }
+}
+
+// launchResolvers launches all the active resolvers concurrently.
+func (c *ChannelArbitrator) launchResolvers() {
+ resolvers := c.resolvers()
+
+ // errChans is a map of channels that will be used to receive errors
+ // returned from launching the resolvers.
+ errChans := make(map[ContractResolver]chan error, len(resolvers))
+
+ // Launch each resolver in goroutines.
+ for _, r := range resolvers {
+ // If the contract is already resolved, there's no need to
+ // launch it again.
+ if r.IsResolved() {
+ log.Debugf("ChannelArbitrator(%v): skipping resolver "+
+ "%T as it's already resolved", c.id(), r)
+
+ continue
+ }
+
+ // Create a signal chan.
+ errChan := make(chan error, 1)
+ errChans[r] = errChan
+
+ go func() {
+ err := r.Launch()
+ errChan <- err
+ }()
+
+ }
+
+ // Wait for all resolvers to finish launching.
+ for r, errChan := range errChans {
+ select {
+ case err := <-errChan:
+ if err == nil {
+ continue
+ }
+
+ log.Errorf("ChannelArbitrator(%v): unable to launch "+
+ "contract resolver(%T): %v", c.id(), r, err)
+
+ case <-c.quit:
+ log.Debugf("ChannelArbitrator quit signal received, " +
+ "exit launchResolvers")
+
+ return
+ }
}
}
@@ -1593,15 +1645,16 @@ func (c *ChannelArbitrator) advanceState(
for {
priorState = c.state
log.Debugf("ChannelArbitrator(%v): attempting state step with "+
- "trigger=%v from state=%v", c.cfg.ChanPoint, trigger,
- priorState)
+ "trigger=%v from state=%v at height=%v",
+ c.id(), trigger, priorState, triggerHeight)
nextState, closeTx, err := c.stateStep(
triggerHeight, trigger, confCommitSet,
)
if err != nil {
log.Errorf("ChannelArbitrator(%v): unable to advance "+
- "state: %v", c.cfg.ChanPoint, err)
+ "state: %v", c.id(), err)
+
return priorState, nil, err
}
@@ -1614,7 +1667,8 @@ func (c *ChannelArbitrator) advanceState(
// terminate.
if nextState == priorState {
log.Debugf("ChannelArbitrator(%v): terminating at "+
- "state=%v", c.cfg.ChanPoint, nextState)
+ "state=%v", c.id(), nextState)
+
return nextState, forceCloseTx, nil
}
@@ -1623,8 +1677,8 @@ func (c *ChannelArbitrator) advanceState(
// the prior state if anything fails.
if err := c.log.CommitState(nextState); err != nil {
log.Errorf("ChannelArbitrator(%v): unable to commit "+
- "next state(%v): %v", c.cfg.ChanPoint,
- nextState, err)
+ "next state(%v): %v", c.id(), nextState, err)
+
return priorState, nil, err
}
c.state = nextState
@@ -1738,8 +1792,8 @@ func (c *ChannelArbitrator) shouldGoOnChain(htlc channeldb.HTLC,
broadcastCutOff := htlc.RefundTimeout - broadcastDelta
log.Tracef("ChannelArbitrator(%v): examining outgoing contract: "+
- "expiry=%v, cutoff=%v, height=%v", c.cfg.ChanPoint, htlc.RefundTimeout,
- broadcastCutOff, currentHeight)
+ "expiry=%v, cutoff=%v, height=%v", c.id(),
+ htlc.RefundTimeout, broadcastCutOff, currentHeight)
// TODO(roasbeef): take into account default HTLC delta, don't need to
// broadcast immediately
@@ -1788,8 +1842,8 @@ func (c *ChannelArbitrator) checkCommitChainActions(height uint32,
log.Debugf("ChannelArbitrator(%v): checking commit chain actions at "+
"height=%v, in_htlc_count=%v, out_htlc_count=%v",
- c.cfg.ChanPoint, height,
- len(htlcs.incomingHTLCs), len(htlcs.outgoingHTLCs))
+ c.id(), height, len(htlcs.incomingHTLCs),
+ len(htlcs.outgoingHTLCs))
actionMap := make(ChainActionMap)
@@ -1818,7 +1872,7 @@ func (c *ChannelArbitrator) checkCommitChainActions(height uint32,
log.Infof("ChannelArbitrator(%v): go to chain for "+
"outgoing htlc %x: timeout=%v, amount=%v, "+
"blocks_until_expiry=%v, broadcast_delta=%v",
- c.cfg.ChanPoint, htlc.RHash[:],
+ c.id(), htlc.RHash[:],
htlc.RefundTimeout, htlc.Amt, remainingBlocks,
c.cfg.OutgoingBroadcastDelta,
)
@@ -1853,7 +1907,7 @@ func (c *ChannelArbitrator) checkCommitChainActions(height uint32,
log.Infof("ChannelArbitrator(%v): go to chain for "+
"incoming htlc %x: timeout=%v, amount=%v, "+
"blocks_until_expiry=%v, broadcast_delta=%v",
- c.cfg.ChanPoint, htlc.RHash[:],
+ c.id(), htlc.RHash[:],
htlc.RefundTimeout, htlc.Amt, remainingBlocks,
c.cfg.IncomingBroadcastDelta,
)
@@ -1868,7 +1922,7 @@ func (c *ChannelArbitrator) checkCommitChainActions(height uint32,
// we're *forced* to act on each HTLC.
if !haveChainActions && trigger == chainTrigger {
log.Tracef("ChannelArbitrator(%v): no actions to take at "+
- "height=%v", c.cfg.ChanPoint, height)
+ "height=%v", c.id(), height)
return actionMap, nil
}
@@ -1884,8 +1938,7 @@ func (c *ChannelArbitrator) checkCommitChainActions(height uint32,
// negative.
case htlc.OutputIndex < 0:
log.Tracef("ChannelArbitrator(%v): immediately "+
- "failing dust htlc=%x", c.cfg.ChanPoint,
- htlc.RHash[:])
+ "failing dust htlc=%x", c.id(), htlc.RHash[:])
actionMap[HtlcFailDustAction] = append(
actionMap[HtlcFailDustAction], htlc,
@@ -1907,7 +1960,7 @@ func (c *ChannelArbitrator) checkCommitChainActions(height uint32,
log.Tracef("ChannelArbitrator(%v): watching chain to "+
"decide action for outgoing htlc=%x",
- c.cfg.ChanPoint, htlc.RHash[:])
+ c.id(), htlc.RHash[:])
actionMap[HtlcOutgoingWatchAction] = append(
actionMap[HtlcOutgoingWatchAction], htlc,
@@ -1917,7 +1970,7 @@ func (c *ChannelArbitrator) checkCommitChainActions(height uint32,
// to sweep this HTLC on-chain
default:
log.Tracef("ChannelArbitrator(%v): going on-chain to "+
- "timeout htlc=%x", c.cfg.ChanPoint, htlc.RHash[:])
+ "timeout htlc=%x", c.id(), htlc.RHash[:])
actionMap[HtlcTimeoutAction] = append(
actionMap[HtlcTimeoutAction], htlc,
@@ -1935,7 +1988,7 @@ func (c *ChannelArbitrator) checkCommitChainActions(height uint32,
if htlc.OutputIndex < 0 {
log.Debugf("ChannelArbitrator(%v): no resolution "+
"needed for incoming dust htlc=%x",
- c.cfg.ChanPoint, htlc.RHash[:])
+ c.id(), htlc.RHash[:])
actionMap[HtlcIncomingDustFinalAction] = append(
actionMap[HtlcIncomingDustFinalAction], htlc,
@@ -1945,8 +1998,7 @@ func (c *ChannelArbitrator) checkCommitChainActions(height uint32,
}
log.Tracef("ChannelArbitrator(%v): watching chain to decide "+
- "action for incoming htlc=%x", c.cfg.ChanPoint,
- htlc.RHash[:])
+ "action for incoming htlc=%x", c.id(), htlc.RHash[:])
actionMap[HtlcIncomingWatchAction] = append(
actionMap[HtlcIncomingWatchAction], htlc,
@@ -2100,8 +2152,7 @@ func (c *ChannelArbitrator) checkRemoteDanglingActions(
}
log.Infof("ChannelArbitrator(%v): fail dangling htlc=%x from "+
- "local/remote commitments diff",
- c.cfg.ChanPoint, htlc.RHash[:])
+ "local/remote commitments diff", c.id(), htlc.RHash[:])
actionMap[HtlcFailDanglingAction] = append(
actionMap[HtlcFailDanglingAction], htlc,
@@ -2183,8 +2234,7 @@ func (c *ChannelArbitrator) checkRemoteDiffActions(
if err != nil {
log.Errorf("ChannelArbitrator(%v): failed to query "+
"preimage for dangling htlc=%x from remote "+
- "commitments diff", c.cfg.ChanPoint,
- htlc.RHash[:])
+ "commitments diff", c.id(), htlc.RHash[:])
continue
}
@@ -2211,8 +2261,7 @@ func (c *ChannelArbitrator) checkRemoteDiffActions(
)
log.Infof("ChannelArbitrator(%v): fail dangling htlc=%x from "+
- "remote commitments diff",
- c.cfg.ChanPoint, htlc.RHash[:])
+ "remote commitments diff", c.id(), htlc.RHash[:])
}
return actionMap
@@ -2293,7 +2342,7 @@ func (c *ChannelArbitrator) prepContractResolutions(
fallthrough
case err == channeldb.ErrChannelNotFound:
log.Warnf("ChannelArbitrator(%v): unable to fetch historical "+
- "state", c.cfg.ChanPoint)
+ "state", c.id())
case err != nil:
return nil, err
@@ -2374,7 +2423,7 @@ func (c *ChannelArbitrator) prepContractResolutions(
// TODO(roasbeef): panic?
log.Errorf("ChannelArbitrator(%v) unable to find "+
"incoming resolution: %v",
- c.cfg.ChanPoint, htlcOp)
+ c.id(), htlcOp)
continue
}
@@ -2401,8 +2450,10 @@ func (c *ChannelArbitrator) prepContractResolutions(
resolution, ok := outResolutionMap[htlcOp]
if !ok {
- log.Errorf("ChannelArbitrator(%v) unable to find "+
- "outgoing resolution: %v", c.cfg.ChanPoint, htlcOp)
+ log.Errorf("ChannelArbitrator(%v) "+
+ "unable to find outgoing "+
+ "resolution: %v",
+ c.id(), htlcOp)
continue
}
@@ -2442,7 +2493,7 @@ func (c *ChannelArbitrator) prepContractResolutions(
if !ok {
log.Errorf("ChannelArbitrator(%v) unable to find "+
"incoming resolution: %v",
- c.cfg.ChanPoint, htlcOp)
+ c.id(), htlcOp)
continue
}
@@ -2473,7 +2524,7 @@ func (c *ChannelArbitrator) prepContractResolutions(
log.Errorf("ChannelArbitrator(%v) "+
"unable to find outgoing "+
"resolution: %v",
- c.cfg.ChanPoint, htlcOp)
+ c.id(), htlcOp)
continue
}
@@ -2541,19 +2592,17 @@ func (c *ChannelArbitrator) replaceResolver(oldResolver,
// contracts.
//
// NOTE: This MUST be run as a goroutine.
-func (c *ChannelArbitrator) resolveContract(currentContract ContractResolver,
- immediate bool) {
-
+func (c *ChannelArbitrator) resolveContract(currentContract ContractResolver) {
defer c.wg.Done()
log.Debugf("ChannelArbitrator(%v): attempting to resolve %T",
- c.cfg.ChanPoint, currentContract)
+ c.id(), currentContract)
// Until the contract is fully resolved, we'll continue to iteratively
// resolve the contract one step at a time.
for !currentContract.IsResolved() {
- log.Debugf("ChannelArbitrator(%v): contract %T not yet resolved",
- c.cfg.ChanPoint, currentContract)
+ log.Debugf("ChannelArbitrator(%v): contract %T not yet "+
+ "resolved", c.id(), currentContract)
select {
@@ -2564,7 +2613,7 @@ func (c *ChannelArbitrator) resolveContract(currentContract ContractResolver,
default:
// Otherwise, we'll attempt to resolve the current
// contract.
- nextContract, err := currentContract.Resolve(immediate)
+ nextContract, err := currentContract.Resolve()
if err != nil {
if err == errResolverShuttingDown {
return
@@ -2572,7 +2621,7 @@ func (c *ChannelArbitrator) resolveContract(currentContract ContractResolver,
log.Errorf("ChannelArbitrator(%v): unable to "+
"progress %T: %v",
- c.cfg.ChanPoint, currentContract, err)
+ c.id(), currentContract, err)
return
}
@@ -2585,7 +2634,7 @@ func (c *ChannelArbitrator) resolveContract(currentContract ContractResolver,
case nextContract != nil:
log.Debugf("ChannelArbitrator(%v): swapping "+
"out contract %T for %T ",
- c.cfg.ChanPoint, currentContract,
+ c.id(), currentContract,
nextContract)
// Swap contract in log.
@@ -2613,12 +2662,19 @@ func (c *ChannelArbitrator) resolveContract(currentContract ContractResolver,
// loop.
currentContract = nextContract
+ // Launch the new contract.
+ err = currentContract.Launch()
+ if err != nil {
+ log.Errorf("Failed to launch %T: %v",
+ currentContract, err)
+ }
+
// If this contract is actually fully resolved, then
// we'll mark it as such within the database.
case currentContract.IsResolved():
log.Debugf("ChannelArbitrator(%v): marking "+
"contract %T fully resolved",
- c.cfg.ChanPoint, currentContract)
+ c.id(), currentContract)
err := c.log.ResolveContract(currentContract)
if err != nil {
@@ -2684,7 +2740,7 @@ func (c *ChannelArbitrator) notifyContractUpdate(upd *ContractUpdate) {
c.unmergedSet[upd.HtlcKey] = newHtlcSet(upd.Htlcs)
log.Tracef("ChannelArbitrator(%v): fresh set of htlcs=%v",
- c.cfg.ChanPoint, lnutils.SpewLogClosure(upd))
+ c.id(), lnutils.SpewLogClosure(upd))
}
// updateActiveHTLCs merges the unmerged set of HTLCs from the link with
@@ -2729,31 +2785,21 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) {
// A new block has arrived, we'll examine all the active HTLC's
// to see if any of them have expired, and also update our
// track of the best current height.
- case blockHeight, ok := <-c.blocks:
- if !ok {
- return
- }
- bestHeight = blockHeight
+ case beat := <-c.BlockbeatChan:
+ bestHeight = beat.Height()
- // If we're not in the default state, then we can
- // ignore this signal as we're waiting for contract
- // resolution.
- if c.state != StateDefault {
- continue
- }
+ log.Debugf("ChannelArbitrator(%v): new block height=%v",
+ c.id(), bestHeight)
- // Now that a new block has arrived, we'll attempt to
- // advance our state forward.
- nextState, _, err := c.advanceState(
- uint32(bestHeight), chainTrigger, nil,
- )
+ err := c.handleBlockbeat(beat)
if err != nil {
- log.Errorf("Unable to advance state: %v", err)
+ log.Errorf("Handle block=%v got err: %v",
+ bestHeight, err)
}
// If as a result of this trigger, the contract is
// fully resolved, then well exit.
- if nextState == StateFullyResolved {
+ if c.state == StateFullyResolved {
return
}
@@ -2763,7 +2809,7 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) {
// properly do our job.
case signalUpdate := <-c.signalUpdates:
log.Tracef("ChannelArbitrator(%v): got new signal "+
- "update!", c.cfg.ChanPoint)
+ "update!", c.id())
// We'll update the ShortChannelID.
c.cfg.ShortChanID = signalUpdate.newSignals.ShortChanID
@@ -2776,237 +2822,61 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) {
// We've cooperatively closed the channel, so we're no longer
// needed. We'll mark the channel as resolved and exit.
case closeInfo := <-c.cfg.ChainEvents.CooperativeClosure:
- log.Infof("ChannelArbitrator(%v) marking channel "+
- "cooperatively closed", c.cfg.ChanPoint)
-
- err := c.cfg.MarkChannelClosed(
- closeInfo.ChannelCloseSummary,
- channeldb.ChanStatusCoopBroadcasted,
- )
+ err := c.handleCoopCloseEvent(closeInfo)
if err != nil {
- log.Errorf("Unable to mark channel closed: "+
- "%v", err)
- return
- }
+ log.Errorf("Failed to handle coop close: %v",
+ err)
- // We'll now advance our state machine until it reaches
- // a terminal state, and the channel is marked resolved.
- _, _, err = c.advanceState(
- closeInfo.CloseHeight, coopCloseTrigger, nil,
- )
- if err != nil {
- log.Errorf("Unable to advance state: %v", err)
return
}
// We have broadcasted our commitment, and it is now confirmed
// on-chain.
case closeInfo := <-c.cfg.ChainEvents.LocalUnilateralClosure:
- log.Infof("ChannelArbitrator(%v): local on-chain "+
- "channel close", c.cfg.ChanPoint)
-
if c.state != StateCommitmentBroadcasted {
log.Errorf("ChannelArbitrator(%v): unexpected "+
- "local on-chain channel close",
- c.cfg.ChanPoint)
+ "local on-chain channel close", c.id())
}
- closeTx := closeInfo.CloseTx
- contractRes := &ContractResolutions{
- CommitHash: closeTx.TxHash(),
- CommitResolution: closeInfo.CommitResolution,
- HtlcResolutions: *closeInfo.HtlcResolutions,
- AnchorResolution: closeInfo.AnchorResolution,
- }
-
- // When processing a unilateral close event, we'll
- // transition to the ContractClosed state. We'll log
- // out the set of resolutions such that they are
- // available to fetch in that state, we'll also write
- // the commit set so we can reconstruct our chain
- // actions on restart.
- err := c.log.LogContractResolutions(contractRes)
- if err != nil {
- log.Errorf("Unable to write resolutions: %v",
- err)
- return
- }
- err = c.log.InsertConfirmedCommitSet(
- &closeInfo.CommitSet,
- )
+ err := c.handleLocalForceCloseEvent(closeInfo)
if err != nil {
- log.Errorf("Unable to write commit set: %v",
- err)
- return
- }
+ log.Errorf("Failed to handle local force "+
+ "close: %v", err)
- // After the set of resolutions are successfully
- // logged, we can safely close the channel. After this
- // succeeds we won't be getting chain events anymore,
- // so we must make sure we can recover on restart after
- // it is marked closed. If the next state transition
- // fails, we'll start up in the prior state again, and
- // we won't be longer getting chain events. In this
- // case we must manually re-trigger the state
- // transition into StateContractClosed based on the
- // close status of the channel.
- err = c.cfg.MarkChannelClosed(
- closeInfo.ChannelCloseSummary,
- channeldb.ChanStatusLocalCloseInitiator,
- )
- if err != nil {
- log.Errorf("Unable to mark "+
- "channel closed: %v", err)
return
}
- // We'll now advance our state machine until it reaches
- // a terminal state.
- _, _, err = c.advanceState(
- uint32(closeInfo.SpendingHeight),
- localCloseTrigger, &closeInfo.CommitSet,
- )
- if err != nil {
- log.Errorf("Unable to advance state: %v", err)
- }
-
// The remote party has broadcast the commitment on-chain.
// We'll examine our state to determine if we need to act at
// all.
case uniClosure := <-c.cfg.ChainEvents.RemoteUnilateralClosure:
- log.Infof("ChannelArbitrator(%v): remote party has "+
- "closed channel out on-chain", c.cfg.ChanPoint)
-
- // If we don't have a self output, and there are no
- // active HTLC's, then we can immediately mark the
- // contract as fully resolved and exit.
- contractRes := &ContractResolutions{
- CommitHash: *uniClosure.SpenderTxHash,
- CommitResolution: uniClosure.CommitResolution,
- HtlcResolutions: *uniClosure.HtlcResolutions,
- AnchorResolution: uniClosure.AnchorResolution,
- }
-
- // When processing a unilateral close event, we'll
- // transition to the ContractClosed state. We'll log
- // out the set of resolutions such that they are
- // available to fetch in that state, we'll also write
- // the commit set so we can reconstruct our chain
- // actions on restart.
- err := c.log.LogContractResolutions(contractRes)
- if err != nil {
- log.Errorf("Unable to write resolutions: %v",
- err)
- return
- }
- err = c.log.InsertConfirmedCommitSet(
- &uniClosure.CommitSet,
- )
+ err := c.handleRemoteForceCloseEvent(uniClosure)
if err != nil {
- log.Errorf("Unable to write commit set: %v",
- err)
- return
- }
+ log.Errorf("Failed to handle remote force "+
+ "close: %v", err)
- // After the set of resolutions are successfully
- // logged, we can safely close the channel. After this
- // succeeds we won't be getting chain events anymore,
- // so we must make sure we can recover on restart after
- // it is marked closed. If the next state transition
- // fails, we'll start up in the prior state again, and
- // we won't be longer getting chain events. In this
- // case we must manually re-trigger the state
- // transition into StateContractClosed based on the
- // close status of the channel.
- closeSummary := &uniClosure.ChannelCloseSummary
- err = c.cfg.MarkChannelClosed(
- closeSummary,
- channeldb.ChanStatusRemoteCloseInitiator,
- )
- if err != nil {
- log.Errorf("Unable to mark channel closed: %v",
- err)
return
}
- // We'll now advance our state machine until it reaches
- // a terminal state.
- _, _, err = c.advanceState(
- uint32(uniClosure.SpendingHeight),
- remoteCloseTrigger, &uniClosure.CommitSet,
- )
- if err != nil {
- log.Errorf("Unable to advance state: %v", err)
- }
-
// The remote has breached the channel. As this is handled by
// the ChainWatcher and BreachArbitrator, we don't have to do
// anything in particular, so just advance our state and
// gracefully exit.
case breachInfo := <-c.cfg.ChainEvents.ContractBreach:
- log.Infof("ChannelArbitrator(%v): remote party has "+
- "breached channel!", c.cfg.ChanPoint)
-
- // In the breach case, we'll only have anchor and
- // breach resolutions.
- contractRes := &ContractResolutions{
- CommitHash: breachInfo.CommitHash,
- BreachResolution: breachInfo.BreachResolution,
- AnchorResolution: breachInfo.AnchorResolution,
- }
-
- // We'll transition to the ContractClosed state and log
- // the set of resolutions such that they can be turned
- // into resolvers later on. We'll also insert the
- // CommitSet of the latest set of commitments.
- err := c.log.LogContractResolutions(contractRes)
- if err != nil {
- log.Errorf("Unable to write resolutions: %v",
- err)
- return
- }
- err = c.log.InsertConfirmedCommitSet(
- &breachInfo.CommitSet,
- )
+ err := c.handleContractBreach(breachInfo)
if err != nil {
- log.Errorf("Unable to write commit set: %v",
- err)
- return
- }
+ log.Errorf("Failed to handle contract breach: "+
+ "%v", err)
- // The channel is finally marked pending closed here as
- // the BreachArbitrator and channel arbitrator have
- // persisted the relevant states.
- closeSummary := &breachInfo.CloseSummary
- err = c.cfg.MarkChannelClosed(
- closeSummary,
- channeldb.ChanStatusRemoteCloseInitiator,
- )
- if err != nil {
- log.Errorf("Unable to mark channel closed: %v",
- err)
return
}
- log.Infof("Breached channel=%v marked pending-closed",
- breachInfo.BreachResolution.FundingOutPoint)
-
- // We'll advance our state machine until it reaches a
- // terminal state.
- _, _, err = c.advanceState(
- uint32(bestHeight), breachCloseTrigger,
- &breachInfo.CommitSet,
- )
- if err != nil {
- log.Errorf("Unable to advance state: %v", err)
- }
-
// A new contract has just been resolved, we'll now check our
// log to see if all contracts have been resolved. If so, then
// we can exit as the contract is fully resolved.
case <-c.resolutionSignal:
log.Infof("ChannelArbitrator(%v): a contract has been "+
- "fully resolved!", c.cfg.ChanPoint)
+ "fully resolved!", c.id())
nextState, _, err := c.advanceState(
uint32(bestHeight), chainTrigger, nil,
@@ -3020,7 +2890,7 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) {
if nextState == StateFullyResolved {
log.Infof("ChannelArbitrator(%v): all "+
"contracts fully resolved, exiting",
- c.cfg.ChanPoint)
+ c.id())
return
}
@@ -3029,7 +2899,7 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) {
// channel. We'll
case closeReq := <-c.forceCloseReqs:
log.Infof("ChannelArbitrator(%v): received force "+
- "close request", c.cfg.ChanPoint)
+ "close request", c.id())
if c.state != StateDefault {
select {
@@ -3068,8 +2938,7 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) {
// advancing our state, then we'll exit.
if nextState == StateFullyResolved {
log.Infof("ChannelArbitrator(%v): all "+
- "contracts resolved, exiting",
- c.cfg.ChanPoint)
+ "contracts resolved, exiting", c.id())
return
}
@@ -3079,6 +2948,94 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) {
}
}
+// handleBlockbeat processes a newly received blockbeat by advancing the
+// arbitrator's internal state using the received block height.
+func (c *ChannelArbitrator) handleBlockbeat(beat chainio.Blockbeat) error {
+ // Notify we've processed the block.
+ defer c.NotifyBlockProcessed(beat, nil)
+
+ // Perform a non-blocking read on the close events in case the channel
+ // is closed in this blockbeat.
+ c.receiveAndProcessCloseEvent()
+
+ // Try to advance the state if we are in StateDefault.
+ if c.state == StateDefault {
+ // Now that a new block has arrived, we'll attempt to advance
+ // our state forward.
+ _, _, err := c.advanceState(
+ uint32(beat.Height()), chainTrigger, nil,
+ )
+ if err != nil {
+ return fmt.Errorf("unable to advance state: %w", err)
+ }
+ }
+
+ // Launch all active resolvers when a new blockbeat is received.
+ c.launchResolvers()
+
+ return nil
+}
+
+// receiveAndProcessCloseEvent does a non-blocking read on all the channel
+// close event channels. If an event is received, it will be further processed.
+func (c *ChannelArbitrator) receiveAndProcessCloseEvent() {
+ select {
+ // Received a coop close event, we now mark the channel as resolved and
+ // exit.
+ case closeInfo := <-c.cfg.ChainEvents.CooperativeClosure:
+ err := c.handleCoopCloseEvent(closeInfo)
+ if err != nil {
+ log.Errorf("Failed to handle coop close: %v", err)
+ return
+ }
+
+ // We have broadcast our commitment, and it is now confirmed onchain.
+ case closeInfo := <-c.cfg.ChainEvents.LocalUnilateralClosure:
+ if c.state != StateCommitmentBroadcasted {
+ log.Errorf("ChannelArbitrator(%v): unexpected "+
+ "local on-chain channel close", c.id())
+ }
+
+ err := c.handleLocalForceCloseEvent(closeInfo)
+ if err != nil {
+ log.Errorf("Failed to handle local force close: %v",
+ err)
+
+ return
+ }
+
+ // The remote party has broadcast the commitment. We'll examine our
+ // state to determine if we need to act at all.
+ case uniClosure := <-c.cfg.ChainEvents.RemoteUnilateralClosure:
+ err := c.handleRemoteForceCloseEvent(uniClosure)
+ if err != nil {
+ log.Errorf("Failed to handle remote force close: %v",
+ err)
+
+ return
+ }
+
+ // The remote has breached the channel! We now launch the
+ // breach contract resolvers.
+ case breachInfo := <-c.cfg.ChainEvents.ContractBreach:
+ err := c.handleContractBreach(breachInfo)
+ if err != nil {
+ log.Errorf("Failed to handle contract breach: %v", err)
+ return
+ }
+
+ default:
+ log.Infof("ChannelArbitrator(%v) no close event", c.id())
+ }
+}
+
+// Name returns a human-readable string for this subsystem.
+//
+// NOTE: Part of chainio.Consumer interface.
+func (c *ChannelArbitrator) Name() string {
+ return fmt.Sprintf("ChannelArbitrator(%v)", c.id())
+}
+
// checkLegacyBreach returns StateFullyResolved if the channel was closed with
// a breach transaction before the channel arbitrator launched its own breach
// resolver. StateContractClosed is returned if this is a modern breach close
@@ -3364,3 +3321,232 @@ func (c *ChannelArbitrator) abandonForwards(htlcs fn.Set[uint64]) error {
return nil
}
+
+// resolvers returns a copy of the active resolvers.
+func (c *ChannelArbitrator) resolvers() []ContractResolver {
+ c.activeResolversLock.Lock()
+ defer c.activeResolversLock.Unlock()
+
+ resolvers := make([]ContractResolver, 0, len(c.activeResolvers))
+ resolvers = append(resolvers, c.activeResolvers...)
+
+ return resolvers
+}
+
+// id returns an identifier for the channel arbitrator to be used in logging.
+// It uses the ShortChanID as the id when it's not zero, otherwise ChanPoint is
+// used.
+func (c *ChannelArbitrator) id() string {
+ id := c.cfg.ShortChanID.String()
+
+ if c.cfg.ShortChanID.IsDefault() {
+ id = c.cfg.ChanPoint.String()
+ }
+
+ return id
+}
+
+// handleCoopCloseEvent takes a coop close event from ChainEvents, marks the
+// channel as closed and advances the state.
+func (c *ChannelArbitrator) handleCoopCloseEvent(
+ closeInfo *CooperativeCloseInfo) error {
+
+ log.Infof("ChannelArbitrator(%v) marking channel cooperatively closed "+
+ "at height %v", c.id(), closeInfo.CloseHeight)
+
+ err := c.cfg.MarkChannelClosed(
+ closeInfo.ChannelCloseSummary,
+ channeldb.ChanStatusCoopBroadcasted,
+ )
+ if err != nil {
+ return fmt.Errorf("unable to mark channel closed: %w", err)
+ }
+
+ // We'll now advance our state machine until it reaches a terminal
+ // state, and the channel is marked resolved.
+ _, _, err = c.advanceState(closeInfo.CloseHeight, coopCloseTrigger, nil)
+ if err != nil {
+ log.Errorf("Unable to advance state: %v", err)
+ }
+
+ return nil
+}
+
+// handleLocalForceCloseEvent takes a local force close event from ChainEvents,
+// saves the contract resolutions to disk, mark the channel as closed and
+// advance the state.
+func (c *ChannelArbitrator) handleLocalForceCloseEvent(
+ closeInfo *LocalUnilateralCloseInfo) error {
+
+ closeTx := closeInfo.CloseTx
+
+ log.Infof("ChannelArbitrator(%v): local force close tx=%v confirmed",
+ c.id(), closeTx.TxHash())
+
+ contractRes := &ContractResolutions{
+ CommitHash: closeTx.TxHash(),
+ CommitResolution: closeInfo.CommitResolution,
+ HtlcResolutions: *closeInfo.HtlcResolutions,
+ AnchorResolution: closeInfo.AnchorResolution,
+ }
+
+ // When processing a unilateral close event, we'll transition to the
+ // ContractClosed state. We'll log out the set of resolutions such that
+ // they are available to fetch in that state, we'll also write the
+ // commit set so we can reconstruct our chain actions on restart.
+ err := c.log.LogContractResolutions(contractRes)
+ if err != nil {
+ return fmt.Errorf("unable to write resolutions: %w", err)
+ }
+
+ err = c.log.InsertConfirmedCommitSet(&closeInfo.CommitSet)
+ if err != nil {
+ return fmt.Errorf("unable to write commit set: %w", err)
+ }
+
+ // After the set of resolutions are successfully logged, we can safely
+ // close the channel. After this succeeds we won't be getting chain
+ // events anymore, so we must make sure we can recover on restart after
+ // it is marked closed. If the next state transition fails, we'll start
+ // up in the prior state again, and we won't be longer getting chain
+ // events. In this case we must manually re-trigger the state
+ // transition into StateContractClosed based on the close status of the
+ // channel.
+ err = c.cfg.MarkChannelClosed(
+ closeInfo.ChannelCloseSummary,
+ channeldb.ChanStatusLocalCloseInitiator,
+ )
+ if err != nil {
+ return fmt.Errorf("unable to mark channel closed: %w", err)
+ }
+
+ // We'll now advance our state machine until it reaches a terminal
+ // state.
+ _, _, err = c.advanceState(
+ uint32(closeInfo.SpendingHeight),
+ localCloseTrigger, &closeInfo.CommitSet,
+ )
+ if err != nil {
+ log.Errorf("Unable to advance state: %v", err)
+ }
+
+ return nil
+}
+
+// handleRemoteForceCloseEvent takes a remote force close event from
+// ChainEvents, saves the contract resolutions to disk, mark the channel as
+// closed and advance the state.
+func (c *ChannelArbitrator) handleRemoteForceCloseEvent(
+ closeInfo *RemoteUnilateralCloseInfo) error {
+
+ log.Infof("ChannelArbitrator(%v): remote party has force closed "+
+ "channel at height %v", c.id(), closeInfo.SpendingHeight)
+
+ // If we don't have a self output, and there are no active HTLC's, then
+ // we can immediately mark the contract as fully resolved and exit.
+ contractRes := &ContractResolutions{
+ CommitHash: *closeInfo.SpenderTxHash,
+ CommitResolution: closeInfo.CommitResolution,
+ HtlcResolutions: *closeInfo.HtlcResolutions,
+ AnchorResolution: closeInfo.AnchorResolution,
+ }
+
+ // When processing a unilateral close event, we'll transition to the
+ // ContractClosed state. We'll log out the set of resolutions such that
+ // they are available to fetch in that state, we'll also write the
+ // commit set so we can reconstruct our chain actions on restart.
+ err := c.log.LogContractResolutions(contractRes)
+ if err != nil {
+ return fmt.Errorf("unable to write resolutions: %w", err)
+ }
+
+ err = c.log.InsertConfirmedCommitSet(&closeInfo.CommitSet)
+ if err != nil {
+ return fmt.Errorf("unable to write commit set: %w", err)
+ }
+
+ // After the set of resolutions are successfully logged, we can safely
+ // close the channel. After this succeeds we won't be getting chain
+ // events anymore, so we must make sure we can recover on restart after
+ // it is marked closed. If the next state transition fails, we'll start
+ // up in the prior state again, and we won't be longer getting chain
+ // events. In this case we must manually re-trigger the state
+ // transition into StateContractClosed based on the close status of the
+ // channel.
+ closeSummary := &closeInfo.ChannelCloseSummary
+ err = c.cfg.MarkChannelClosed(
+ closeSummary,
+ channeldb.ChanStatusRemoteCloseInitiator,
+ )
+ if err != nil {
+ return fmt.Errorf("unable to mark channel closed: %w", err)
+ }
+
+ // We'll now advance our state machine until it reaches a terminal
+ // state.
+ _, _, err = c.advanceState(
+ uint32(closeInfo.SpendingHeight),
+ remoteCloseTrigger, &closeInfo.CommitSet,
+ )
+ if err != nil {
+ log.Errorf("Unable to advance state: %v", err)
+ }
+
+ return nil
+}
+
+// handleContractBreach takes a breach close event from ChainEvents, saves the
+// contract resolutions to disk, mark the channel as closed and advance the
+// state.
+func (c *ChannelArbitrator) handleContractBreach(
+ breachInfo *BreachCloseInfo) error {
+
+ closeSummary := &breachInfo.CloseSummary
+
+ log.Infof("ChannelArbitrator(%v): remote party has breached channel "+
+ "at height %v!", c.id(), closeSummary.CloseHeight)
+
+ // In the breach case, we'll only have anchor and breach resolutions.
+ contractRes := &ContractResolutions{
+ CommitHash: breachInfo.CommitHash,
+ BreachResolution: breachInfo.BreachResolution,
+ AnchorResolution: breachInfo.AnchorResolution,
+ }
+
+ // We'll transition to the ContractClosed state and log the set of
+ // resolutions such that they can be turned into resolvers later on.
+ // We'll also insert the CommitSet of the latest set of commitments.
+ err := c.log.LogContractResolutions(contractRes)
+ if err != nil {
+ return fmt.Errorf("unable to write resolutions: %w", err)
+ }
+
+ err = c.log.InsertConfirmedCommitSet(&breachInfo.CommitSet)
+ if err != nil {
+ return fmt.Errorf("unable to write commit set: %w", err)
+ }
+
+ // The channel is finally marked pending closed here as the
+ // BreachArbitrator and channel arbitrator have persisted the relevant
+ // states.
+ err = c.cfg.MarkChannelClosed(
+ closeSummary, channeldb.ChanStatusRemoteCloseInitiator,
+ )
+ if err != nil {
+ return fmt.Errorf("unable to mark channel closed: %w", err)
+ }
+
+ log.Infof("Breached channel=%v marked pending-closed",
+ breachInfo.BreachResolution.FundingOutPoint)
+
+ // We'll advance our state machine until it reaches a terminal state.
+ _, _, err = c.advanceState(
+ closeSummary.CloseHeight, breachCloseTrigger,
+ &breachInfo.CommitSet,
+ )
+ if err != nil {
+ log.Errorf("Unable to advance state: %v", err)
+ }
+
+ return nil
+}
diff --git a/contractcourt/channel_arbitrator_test.go b/contractcourt/channel_arbitrator_test.go
index 1353770d8a..9e47569213 100644
--- a/contractcourt/channel_arbitrator_test.go
+++ b/contractcourt/channel_arbitrator_test.go
@@ -13,6 +13,8 @@ import (
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
+ "github.com/davecgh/go-spew/spew"
+ "github.com/lightningnetwork/lnd/chainio"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/models"
@@ -226,6 +228,15 @@ func (c *chanArbTestCtx) CleanUp() {
}
}
+// receiveBlockbeat mocks the behavior of a blockbeat being sent by the
+// BlockbeatDispatcher, which essentially mocks the method `ProcessBlock`.
+func (c *chanArbTestCtx) receiveBlockbeat(height int) {
+ go func() {
+ beat := newBeatFromHeight(int32(height))
+ c.chanArb.BlockbeatChan <- beat
+ }()
+}
+
// AssertStateTransitions asserts that the state machine steps through the
// passed states in order.
func (c *chanArbTestCtx) AssertStateTransitions(expectedStates ...ArbitratorState) {
@@ -285,7 +296,8 @@ func (c *chanArbTestCtx) Restart(restartClosure func(*chanArbTestCtx)) (*chanArb
restartClosure(newCtx)
}
- if err := newCtx.chanArb.Start(nil); err != nil {
+ beat := newBeatFromHeight(0)
+ if err := newCtx.chanArb.Start(nil, beat); err != nil {
return nil, err
}
@@ -512,7 +524,8 @@ func TestChannelArbitratorCooperativeClose(t *testing.T) {
chanArbCtx, err := createTestChannelArbitrator(t, log)
require.NoError(t, err, "unable to create ChannelArbitrator")
- if err := chanArbCtx.chanArb.Start(nil); err != nil {
+ beat := newBeatFromHeight(0)
+ if err := chanArbCtx.chanArb.Start(nil, beat); err != nil {
t.Fatalf("unable to start ChannelArbitrator: %v", err)
}
t.Cleanup(func() {
@@ -570,7 +583,8 @@ func TestChannelArbitratorRemoteForceClose(t *testing.T) {
require.NoError(t, err, "unable to create ChannelArbitrator")
chanArb := chanArbCtx.chanArb
- if err := chanArb.Start(nil); err != nil {
+ beat := newBeatFromHeight(0)
+ if err := chanArb.Start(nil, beat); err != nil {
t.Fatalf("unable to start ChannelArbitrator: %v", err)
}
defer chanArb.Stop()
@@ -623,7 +637,8 @@ func TestChannelArbitratorLocalForceClose(t *testing.T) {
require.NoError(t, err, "unable to create ChannelArbitrator")
chanArb := chanArbCtx.chanArb
- if err := chanArb.Start(nil); err != nil {
+ beat := newBeatFromHeight(0)
+ if err := chanArb.Start(nil, beat); err != nil {
t.Fatalf("unable to start ChannelArbitrator: %v", err)
}
defer chanArb.Stop()
@@ -731,7 +746,8 @@ func TestChannelArbitratorBreachClose(t *testing.T) {
chanArb.cfg.PreimageDB = newMockWitnessBeacon()
chanArb.cfg.Registry = &mockRegistry{}
- if err := chanArb.Start(nil); err != nil {
+ beat := newBeatFromHeight(0)
+ if err := chanArb.Start(nil, beat); err != nil {
t.Fatalf("unable to start ChannelArbitrator: %v", err)
}
t.Cleanup(func() {
@@ -858,7 +874,8 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) {
chanArb.cfg.PreimageDB = newMockWitnessBeacon()
chanArb.cfg.Registry = &mockRegistry{}
- if err := chanArb.Start(nil); err != nil {
+ beat := newBeatFromHeight(0)
+ if err := chanArb.Start(nil, beat); err != nil {
t.Fatalf("unable to start ChannelArbitrator: %v", err)
}
defer chanArb.Stop()
@@ -961,6 +978,7 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) {
},
},
}
+ closeTxid := closeTx.TxHash()
htlcOp := wire.OutPoint{
Hash: closeTx.TxHash(),
@@ -1029,7 +1047,7 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) {
}
require.Equal(t, expectedFinalHtlcs, chanArbCtx.finalHtlcs)
- // We'll no re-create the resolver, notice that we use the existing
+ // We'll now re-create the resolver, notice that we use the existing
// arbLog so it carries over the same on-disk state.
chanArbCtxNew, err := chanArbCtx.Restart(nil)
require.NoError(t, err, "unable to create ChannelArbitrator")
@@ -1079,7 +1097,11 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) {
// Notify resolver that the HTLC output of the commitment has been
// spent.
- oldNotifier.SpendChan <- &chainntnfs.SpendDetail{SpendingTx: closeTx}
+ oldNotifier.SpendChan <- &chainntnfs.SpendDetail{
+ SpendingTx: closeTx,
+ SpentOutPoint: &wire.OutPoint{},
+ SpenderTxHash: &closeTxid,
+ }
// Finally, we should also receive a resolution message instructing the
// switch to cancel back the HTLC.
@@ -1106,8 +1128,12 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) {
default:
}
- // Notify resolver that the second level transaction is spent.
- oldNotifier.SpendChan <- &chainntnfs.SpendDetail{SpendingTx: closeTx}
+ // Notify resolver that the output of the timeout tx has been spent.
+ oldNotifier.SpendChan <- &chainntnfs.SpendDetail{
+ SpendingTx: closeTx,
+ SpentOutPoint: &wire.OutPoint{},
+ SpenderTxHash: &closeTxid,
+ }
// At this point channel should be marked as resolved.
chanArbCtxNew.AssertStateTransitions(StateFullyResolved)
@@ -1131,7 +1157,8 @@ func TestChannelArbitratorLocalForceCloseRemoteConfirmed(t *testing.T) {
require.NoError(t, err, "unable to create ChannelArbitrator")
chanArb := chanArbCtx.chanArb
- if err := chanArb.Start(nil); err != nil {
+ beat := newBeatFromHeight(0)
+ if err := chanArb.Start(nil, beat); err != nil {
t.Fatalf("unable to start ChannelArbitrator: %v", err)
}
defer chanArb.Stop()
@@ -1238,7 +1265,8 @@ func TestChannelArbitratorLocalForceDoubleSpend(t *testing.T) {
require.NoError(t, err, "unable to create ChannelArbitrator")
chanArb := chanArbCtx.chanArb
- if err := chanArb.Start(nil); err != nil {
+ beat := newBeatFromHeight(0)
+ if err := chanArb.Start(nil, beat); err != nil {
t.Fatalf("unable to start ChannelArbitrator: %v", err)
}
defer chanArb.Stop()
@@ -1344,7 +1372,8 @@ func TestChannelArbitratorPersistence(t *testing.T) {
require.NoError(t, err, "unable to create ChannelArbitrator")
chanArb := chanArbCtx.chanArb
- if err := chanArb.Start(nil); err != nil {
+ beat := newBeatFromHeight(0)
+ if err := chanArb.Start(nil, beat); err != nil {
t.Fatalf("unable to start ChannelArbitrator: %v", err)
}
@@ -1462,7 +1491,8 @@ func TestChannelArbitratorForceCloseBreachedChannel(t *testing.T) {
require.NoError(t, err, "unable to create ChannelArbitrator")
chanArb := chanArbCtx.chanArb
- if err := chanArb.Start(nil); err != nil {
+ beat := newBeatFromHeight(0)
+ if err := chanArb.Start(nil, beat); err != nil {
t.Fatalf("unable to start ChannelArbitrator: %v", err)
}
@@ -1646,7 +1676,8 @@ func TestChannelArbitratorCommitFailure(t *testing.T) {
}
chanArb := chanArbCtx.chanArb
- if err := chanArb.Start(nil); err != nil {
+ beat := newBeatFromHeight(0)
+ if err := chanArb.Start(nil, beat); err != nil {
t.Fatalf("unable to start ChannelArbitrator: %v", err)
}
@@ -1730,7 +1761,8 @@ func TestChannelArbitratorEmptyResolutions(t *testing.T) {
chanArb.cfg.ClosingHeight = 100
chanArb.cfg.CloseType = channeldb.RemoteForceClose
- if err := chanArb.Start(nil); err != nil {
+ beat := newBeatFromHeight(100)
+ if err := chanArb.Start(nil, beat); err != nil {
t.Fatalf("unable to start ChannelArbitrator: %v", err)
}
@@ -1760,7 +1792,8 @@ func TestChannelArbitratorAlreadyForceClosed(t *testing.T) {
chanArbCtx, err := createTestChannelArbitrator(t, log)
require.NoError(t, err, "unable to create ChannelArbitrator")
chanArb := chanArbCtx.chanArb
- if err := chanArb.Start(nil); err != nil {
+ beat := newBeatFromHeight(0)
+ if err := chanArb.Start(nil, beat); err != nil {
t.Fatalf("unable to start ChannelArbitrator: %v", err)
}
defer chanArb.Stop()
@@ -1858,9 +1891,10 @@ func TestChannelArbitratorDanglingCommitForceClose(t *testing.T) {
t.Fatalf("unable to create ChannelArbitrator: %v", err)
}
chanArb := chanArbCtx.chanArb
- if err := chanArb.Start(nil); err != nil {
- t.Fatalf("unable to start ChannelArbitrator: %v", err)
- }
+ beat := newBeatFromHeight(0)
+ err = chanArb.Start(nil, beat)
+ require.NoError(t, err)
+
defer chanArb.Stop()
// Now that our channel arb has started, we'll set up
@@ -1904,7 +1938,8 @@ func TestChannelArbitratorDanglingCommitForceClose(t *testing.T) {
// now mine a block (height 5), which is 5 blocks away
// (our grace delta) from the expiry of that HTLC.
case testCase.htlcExpired:
- chanArbCtx.chanArb.blocks <- 5
+ beat := newBeatFromHeight(5)
+ chanArbCtx.chanArb.BlockbeatChan <- beat
// Otherwise, we'll just trigger a regular force close
// request.
@@ -2012,8 +2047,7 @@ func TestChannelArbitratorDanglingCommitForceClose(t *testing.T) {
// so instead, we'll mine another block which'll cause
// it to re-examine its state and realize there're no
// more HTLCs.
- chanArbCtx.chanArb.blocks <- 6
- chanArbCtx.AssertStateTransitions(StateFullyResolved)
+ chanArbCtx.receiveBlockbeat(6)
})
}
}
@@ -2050,7 +2084,8 @@ func TestChannelArbitratorPendingExpiredHTLC(t *testing.T) {
return false
}
- if err := chanArb.Start(nil); err != nil {
+ beat := newBeatFromHeight(0)
+ if err := chanArb.Start(nil, beat); err != nil {
t.Fatalf("unable to start ChannelArbitrator: %v", err)
}
t.Cleanup(func() {
@@ -2084,13 +2119,15 @@ func TestChannelArbitratorPendingExpiredHTLC(t *testing.T) {
// We will advance the uptime to 10 seconds which should be still within
// the grace period and should not trigger going to chain.
testClock.SetTime(startTime.Add(time.Second * 10))
- chanArbCtx.chanArb.blocks <- 5
+ beat = newBeatFromHeight(5)
+ chanArbCtx.chanArb.BlockbeatChan <- beat
chanArbCtx.AssertState(StateDefault)
// We will advance the uptime to 16 seconds which should trigger going
// to chain.
testClock.SetTime(startTime.Add(time.Second * 16))
- chanArbCtx.chanArb.blocks <- 6
+ beat = newBeatFromHeight(6)
+ chanArbCtx.chanArb.BlockbeatChan <- beat
chanArbCtx.AssertStateTransitions(
StateBroadcastCommit,
StateCommitmentBroadcasted,
@@ -2203,8 +2240,8 @@ func TestRemoteCloseInitiator(t *testing.T) {
"ChannelArbitrator: %v", err)
}
chanArb := chanArbCtx.chanArb
-
- if err := chanArb.Start(nil); err != nil {
+ beat := newBeatFromHeight(0)
+ if err := chanArb.Start(nil, beat); err != nil {
t.Fatalf("unable to start "+
"ChannelArbitrator: %v", err)
}
@@ -2458,7 +2495,7 @@ func TestSweepAnchors(t *testing.T) {
// Set current block height.
heightHint := uint32(1000)
- chanArbCtx.chanArb.blocks <- int32(heightHint)
+ chanArbCtx.receiveBlockbeat(int(heightHint))
htlcIndexBase := uint64(99)
deadlineDelta := uint32(10)
@@ -2621,7 +2658,7 @@ func TestSweepLocalAnchor(t *testing.T) {
// Set current block height.
heightHint := uint32(1000)
- chanArbCtx.chanArb.blocks <- int32(heightHint)
+ chanArbCtx.receiveBlockbeat(int(heightHint))
htlcIndex := uint64(99)
deadlineDelta := uint32(10)
@@ -2755,7 +2792,9 @@ func TestChannelArbitratorAnchors(t *testing.T) {
},
}
- if err := chanArb.Start(nil); err != nil {
+ heightHint := uint32(1000)
+ beat := newBeatFromHeight(int32(heightHint))
+ if err := chanArb.Start(nil, beat); err != nil {
t.Fatalf("unable to start ChannelArbitrator: %v", err)
}
t.Cleanup(func() {
@@ -2767,27 +2806,28 @@ func TestChannelArbitratorAnchors(t *testing.T) {
}
chanArb.UpdateContractSignals(signals)
- // Set current block height.
- heightHint := uint32(1000)
- chanArbCtx.chanArb.blocks <- int32(heightHint)
-
htlcAmt := lnwire.MilliSatoshi(1_000_000)
// Create testing HTLCs.
- deadlineDelta := uint32(10)
- deadlinePreimageDelta := deadlineDelta + 2
+ spendingHeight := uint32(beat.Height())
+ deadlineDelta := uint32(100)
+
+ deadlinePreimageDelta := deadlineDelta
htlcWithPreimage := channeldb.HTLC{
- HtlcIndex: 99,
- RefundTimeout: heightHint + deadlinePreimageDelta,
+ HtlcIndex: 99,
+ // RefundTimeout is 101.
+ RefundTimeout: spendingHeight + deadlinePreimageDelta,
RHash: rHash,
Incoming: true,
Amt: htlcAmt,
}
+ expectedDeadline := deadlineDelta/2 + spendingHeight
- deadlineHTLCdelta := deadlineDelta + 3
+ deadlineHTLCdelta := deadlineDelta + 40
htlc := channeldb.HTLC{
- HtlcIndex: 100,
- RefundTimeout: heightHint + deadlineHTLCdelta,
+ HtlcIndex: 100,
+ // RefundTimeout is 141.
+ RefundTimeout: spendingHeight + deadlineHTLCdelta,
Amt: htlcAmt,
}
@@ -2871,7 +2911,9 @@ func TestChannelArbitratorAnchors(t *testing.T) {
}
chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{
- SpendDetail: &chainntnfs.SpendDetail{},
+ SpendDetail: &chainntnfs.SpendDetail{
+ SpendingHeight: int32(spendingHeight),
+ },
LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{
CloseTx: closeTx,
HtlcResolutions: &lnwallet.HtlcResolutions{},
@@ -2933,12 +2975,14 @@ func TestChannelArbitratorAnchors(t *testing.T) {
// to htlcWithPreimage's CLTV.
require.Equal(t, 2, len(chanArbCtx.sweeper.deadlines))
require.EqualValues(t,
- heightHint+deadlinePreimageDelta/2,
- chanArbCtx.sweeper.deadlines[0],
+ expectedDeadline,
+ chanArbCtx.sweeper.deadlines[0], "want %d, got %d",
+ expectedDeadline, chanArbCtx.sweeper.deadlines[0],
)
require.EqualValues(t,
- heightHint+deadlinePreimageDelta/2,
- chanArbCtx.sweeper.deadlines[1],
+ expectedDeadline,
+ chanArbCtx.sweeper.deadlines[1], "want %d, got %d",
+ expectedDeadline, chanArbCtx.sweeper.deadlines[1],
)
}
@@ -3037,7 +3081,8 @@ func TestChannelArbitratorStartForceCloseFail(t *testing.T) {
return test.broadcastErr
}
- err = chanArb.Start(nil)
+ beat := newBeatFromHeight(0)
+ err = chanArb.Start(nil, beat)
if !test.expectedStartup {
require.ErrorIs(t, err, test.broadcastErr)
@@ -3085,7 +3130,8 @@ func assertResolverReport(t *testing.T, reports chan *channeldb.ResolverReport,
select {
case report := <-reports:
if !reflect.DeepEqual(report, expected) {
- t.Fatalf("expected: %v, got: %v", expected, report)
+ t.Fatalf("expected: %v, got: %v", spew.Sdump(expected),
+ spew.Sdump(report))
}
case <-time.After(defaultTimeout):
@@ -3120,3 +3166,11 @@ func (m *mockChannel) ForceCloseChan() (*lnwallet.LocalForceCloseSummary, error)
}
return summary, nil
}
+
+func newBeatFromHeight(height int32) *chainio.Beat {
+ epoch := chainntnfs.BlockEpoch{
+ Height: height,
+ }
+
+ return chainio.NewBeat(epoch)
+}
diff --git a/contractcourt/commit_sweep_resolver.go b/contractcourt/commit_sweep_resolver.go
index 4b47a34294..77c04b77b7 100644
--- a/contractcourt/commit_sweep_resolver.go
+++ b/contractcourt/commit_sweep_resolver.go
@@ -39,9 +39,6 @@ type commitSweepResolver struct {
// this HTLC on-chain.
commitResolution lnwallet.CommitOutputResolution
- // resolved reflects if the contract has been fully resolved or not.
- resolved bool
-
// broadcastHeight is the height that the original contract was
// broadcast to the main-chain at. We'll use this value to bound any
// historical queries to the chain for spends/confirmations.
@@ -88,7 +85,7 @@ func newCommitSweepResolver(res lnwallet.CommitOutputResolution,
chanPoint: chanPoint,
}
- r.initLogger(r)
+ r.initLogger(fmt.Sprintf("%T(%v)", r, r.commitResolution.SelfOutPoint))
r.initReport()
return r
@@ -101,36 +98,6 @@ func (c *commitSweepResolver) ResolverKey() []byte {
return key[:]
}
-// waitForHeight registers for block notifications and waits for the provided
-// block height to be reached.
-func waitForHeight(waitHeight uint32, notifier chainntnfs.ChainNotifier,
- quit <-chan struct{}) error {
-
- // Register for block epochs. After registration, the current height
- // will be sent on the channel immediately.
- blockEpochs, err := notifier.RegisterBlockEpochNtfn(nil)
- if err != nil {
- return err
- }
- defer blockEpochs.Cancel()
-
- for {
- select {
- case newBlock, ok := <-blockEpochs.Epochs:
- if !ok {
- return errResolverShuttingDown
- }
- height := newBlock.Height
- if height >= int32(waitHeight) {
- return nil
- }
-
- case <-quit:
- return errResolverShuttingDown
- }
- }
-}
-
// waitForSpend waits for the given outpoint to be spent, and returns the
// details of the spending tx.
func waitForSpend(op *wire.OutPoint, pkScript []byte, heightHint uint32,
@@ -195,203 +162,17 @@ func (c *commitSweepResolver) getCommitTxConfHeight() (uint32, error) {
// returned.
//
// NOTE: This function MUST be run as a goroutine.
+
+// TODO(yy): fix the funlen in the next PR.
//
//nolint:funlen
-func (c *commitSweepResolver) Resolve(_ bool) (ContractResolver, error) {
+func (c *commitSweepResolver) Resolve() (ContractResolver, error) {
// If we're already resolved, then we can exit early.
- if c.resolved {
+ if c.IsResolved() {
+ c.log.Errorf("already resolved")
return nil, nil
}
- confHeight, err := c.getCommitTxConfHeight()
- if err != nil {
- return nil, err
- }
-
- // Wait up until the CSV expires, unless we also have a CLTV that
- // expires after.
- unlockHeight := confHeight + c.commitResolution.MaturityDelay
- if c.hasCLTV() {
- unlockHeight = uint32(math.Max(
- float64(unlockHeight), float64(c.leaseExpiry),
- ))
- }
-
- c.log.Debugf("commit conf_height=%v, unlock_height=%v",
- confHeight, unlockHeight)
-
- // Update report now that we learned the confirmation height.
- c.reportLock.Lock()
- c.currentReport.MaturityHeight = unlockHeight
- c.reportLock.Unlock()
-
- // If there is a csv/cltv lock, we'll wait for that.
- if c.commitResolution.MaturityDelay > 0 || c.hasCLTV() {
- // Determine what height we should wait until for the locks to
- // expire.
- var waitHeight uint32
- switch {
- // If we have both a csv and cltv lock, we'll need to look at
- // both and see which expires later.
- case c.commitResolution.MaturityDelay > 0 && c.hasCLTV():
- c.log.Debugf("waiting for CSV and CLTV lock to expire "+
- "at height %v", unlockHeight)
- // If the CSV expires after the CLTV, or there is no
- // CLTV, then we can broadcast a sweep a block before.
- // Otherwise, we need to broadcast at our expected
- // unlock height.
- waitHeight = uint32(math.Max(
- float64(unlockHeight-1), float64(c.leaseExpiry),
- ))
-
- // If we only have a csv lock, wait for the height before the
- // lock expires as the spend path should be unlocked by then.
- case c.commitResolution.MaturityDelay > 0:
- c.log.Debugf("waiting for CSV lock to expire at "+
- "height %v", unlockHeight)
- waitHeight = unlockHeight - 1
- }
-
- err := waitForHeight(waitHeight, c.Notifier, c.quit)
- if err != nil {
- return nil, err
- }
- }
-
- var (
- isLocalCommitTx bool
-
- signDesc = c.commitResolution.SelfOutputSignDesc
- )
-
- switch {
- // For taproot channels, we'll know if this is the local commit based
- // on the timelock value. For remote commitment transactions, the
- // witness script has a timelock of 1.
- case c.chanType.IsTaproot():
- delayKey := c.localChanCfg.DelayBasePoint.PubKey
- nonDelayKey := c.localChanCfg.PaymentBasePoint.PubKey
-
- signKey := c.commitResolution.SelfOutputSignDesc.KeyDesc.PubKey
-
- // If the key in the script is neither of these, we shouldn't
- // proceed. This should be impossible.
- if !signKey.IsEqual(delayKey) && !signKey.IsEqual(nonDelayKey) {
- return nil, fmt.Errorf("unknown sign key %v", signKey)
- }
-
- // The commitment transaction is ours iff the signing key is
- // the delay key.
- isLocalCommitTx = signKey.IsEqual(delayKey)
-
- // The output is on our local commitment if the script starts with
- // OP_IF for the revocation clause. On the remote commitment it will
- // either be a regular P2WKH or a simple sig spend with a CSV delay.
- default:
- isLocalCommitTx = signDesc.WitnessScript[0] == txscript.OP_IF
- }
- isDelayedOutput := c.commitResolution.MaturityDelay != 0
-
- c.log.Debugf("isDelayedOutput=%v, isLocalCommitTx=%v", isDelayedOutput,
- isLocalCommitTx)
-
- // There're three types of commitments, those that have tweaks for the
- // remote key (us in this case), those that don't, and a third where
- // there is no tweak and the output is delayed. On the local commitment
- // our output will always be delayed. We'll rely on the presence of the
- // commitment tweak to discern which type of commitment this is.
- var witnessType input.WitnessType
- switch {
- // The local delayed output for a taproot channel.
- case isLocalCommitTx && c.chanType.IsTaproot():
- witnessType = input.TaprootLocalCommitSpend
-
- // The CSV 1 delayed output for a taproot channel.
- case !isLocalCommitTx && c.chanType.IsTaproot():
- witnessType = input.TaprootRemoteCommitSpend
-
- // Delayed output to us on our local commitment for a channel lease in
- // which we are the initiator.
- case isLocalCommitTx && c.hasCLTV():
- witnessType = input.LeaseCommitmentTimeLock
-
- // Delayed output to us on our local commitment.
- case isLocalCommitTx:
- witnessType = input.CommitmentTimeLock
-
- // A confirmed output to us on the remote commitment for a channel lease
- // in which we are the initiator.
- case isDelayedOutput && c.hasCLTV():
- witnessType = input.LeaseCommitmentToRemoteConfirmed
-
- // A confirmed output to us on the remote commitment.
- case isDelayedOutput:
- witnessType = input.CommitmentToRemoteConfirmed
-
- // A non-delayed output on the remote commitment where the key is
- // tweakless.
- case c.commitResolution.SelfOutputSignDesc.SingleTweak == nil:
- witnessType = input.CommitSpendNoDelayTweakless
-
- // A non-delayed output on the remote commitment where the key is
- // tweaked.
- default:
- witnessType = input.CommitmentNoDelay
- }
-
- c.log.Infof("Sweeping with witness type: %v", witnessType)
-
- // We'll craft an input with all the information required for the
- // sweeper to create a fully valid sweeping transaction to recover
- // these coins.
- var inp *input.BaseInput
- if c.hasCLTV() {
- inp = input.NewCsvInputWithCltv(
- &c.commitResolution.SelfOutPoint, witnessType,
- &c.commitResolution.SelfOutputSignDesc,
- c.broadcastHeight, c.commitResolution.MaturityDelay,
- c.leaseExpiry,
- input.WithResolutionBlob(
- c.commitResolution.ResolutionBlob,
- ),
- )
- } else {
- inp = input.NewCsvInput(
- &c.commitResolution.SelfOutPoint, witnessType,
- &c.commitResolution.SelfOutputSignDesc,
- c.broadcastHeight, c.commitResolution.MaturityDelay,
- input.WithResolutionBlob(
- c.commitResolution.ResolutionBlob,
- ),
- )
- }
-
- // TODO(roasbeef): instead of ading ctrl block to the sign desc, make
- // new input type, have sweeper set it?
-
- // Calculate the budget for the sweeping this input.
- budget := calculateBudget(
- btcutil.Amount(inp.SignDesc().Output.Value),
- c.Budget.ToLocalRatio, c.Budget.ToLocal,
- )
- c.log.Infof("Sweeping commit output using budget=%v", budget)
-
- // With our input constructed, we'll now offer it to the sweeper.
- resultChan, err := c.Sweeper.SweepInput(
- inp, sweep.Params{
- Budget: budget,
-
- // Specify a nil deadline here as there's no time
- // pressure.
- DeadlineHeight: fn.None[int32](),
- },
- )
- if err != nil {
- c.log.Errorf("unable to sweep input: %v", err)
-
- return nil, err
- }
-
var sweepTxID chainhash.Hash
// Sweeper is going to join this input with other inputs if possible
@@ -400,7 +181,7 @@ func (c *commitSweepResolver) Resolve(_ bool) (ContractResolver, error) {
// happen.
outcome := channeldb.ResolverOutcomeClaimed
select {
- case sweepResult := <-resultChan:
+ case sweepResult := <-c.sweepResultChan:
switch sweepResult.Err {
// If the remote party was able to sweep this output it's
// likely what we sent was actually a revoked commitment.
@@ -440,7 +221,7 @@ func (c *commitSweepResolver) Resolve(_ bool) (ContractResolver, error) {
report := c.currentReport.resolverReport(
&sweepTxID, channeldb.ResolverTypeCommit, outcome,
)
- c.resolved = true
+ c.resolved.Store(true)
// Checkpoint the resolver with a closure that will write the outcome
// of the resolver and its sweep transaction to disk.
@@ -452,17 +233,11 @@ func (c *commitSweepResolver) Resolve(_ bool) (ContractResolver, error) {
//
// NOTE: Part of the ContractResolver interface.
func (c *commitSweepResolver) Stop() {
+ c.log.Debugf("stopping...")
+ defer c.log.Debugf("stopped")
close(c.quit)
}
-// IsResolved returns true if the stored state in the resolve is fully
-// resolved. In this case the target output can be forgotten.
-//
-// NOTE: Part of the ContractResolver interface.
-func (c *commitSweepResolver) IsResolved() bool {
- return c.resolved
-}
-
// SupplementState allows the user of a ContractResolver to supplement it with
// state required for the proper resolution of a contract.
//
@@ -491,7 +266,7 @@ func (c *commitSweepResolver) Encode(w io.Writer) error {
return err
}
- if err := binary.Write(w, endian, c.resolved); err != nil {
+ if err := binary.Write(w, endian, c.IsResolved()); err != nil {
return err
}
if err := binary.Write(w, endian, c.broadcastHeight); err != nil {
@@ -526,9 +301,12 @@ func newCommitSweepResolverFromReader(r io.Reader, resCfg ResolverConfig) (
return nil, err
}
- if err := binary.Read(r, endian, &c.resolved); err != nil {
+ var resolved bool
+ if err := binary.Read(r, endian, &resolved); err != nil {
return nil, err
}
+ c.resolved.Store(resolved)
+
if err := binary.Read(r, endian, &c.broadcastHeight); err != nil {
return nil, err
}
@@ -545,7 +323,7 @@ func newCommitSweepResolverFromReader(r io.Reader, resCfg ResolverConfig) (
// removed this, but keep in mind that this data may still be present in
// the database.
- c.initLogger(c)
+ c.initLogger(fmt.Sprintf("%T(%v)", c, c.commitResolution.SelfOutPoint))
c.initReport()
return c, nil
@@ -585,3 +363,181 @@ func (c *commitSweepResolver) initReport() {
// A compile time assertion to ensure commitSweepResolver meets the
// ContractResolver interface.
var _ reportingContractResolver = (*commitSweepResolver)(nil)
+
+// Launch constructs a commit input and offers it to the sweeper.
+func (c *commitSweepResolver) Launch() error {
+ if c.launched.Load() {
+ c.log.Tracef("already launched")
+ return nil
+ }
+
+ c.log.Debugf("launching resolver...")
+ c.launched.Store(true)
+
+ // If we're already resolved, then we can exit early.
+ if c.IsResolved() {
+ c.log.Errorf("already resolved")
+ return nil
+ }
+
+ confHeight, err := c.getCommitTxConfHeight()
+ if err != nil {
+ return err
+ }
+
+ // Wait up until the CSV expires, unless we also have a CLTV that
+ // expires after.
+ unlockHeight := confHeight + c.commitResolution.MaturityDelay
+ if c.hasCLTV() {
+ unlockHeight = uint32(math.Max(
+ float64(unlockHeight), float64(c.leaseExpiry),
+ ))
+ }
+
+ // Update report now that we learned the confirmation height.
+ c.reportLock.Lock()
+ c.currentReport.MaturityHeight = unlockHeight
+ c.reportLock.Unlock()
+
+ // Derive the witness type for this input.
+ witnessType, err := c.decideWitnessType()
+ if err != nil {
+ return err
+ }
+
+ // We'll craft an input with all the information required for the
+ // sweeper to create a fully valid sweeping transaction to recover
+ // these coins.
+ var inp *input.BaseInput
+ if c.hasCLTV() {
+ inp = input.NewCsvInputWithCltv(
+ &c.commitResolution.SelfOutPoint, witnessType,
+ &c.commitResolution.SelfOutputSignDesc,
+ c.broadcastHeight, c.commitResolution.MaturityDelay,
+ c.leaseExpiry,
+ )
+ } else {
+ inp = input.NewCsvInput(
+ &c.commitResolution.SelfOutPoint, witnessType,
+ &c.commitResolution.SelfOutputSignDesc,
+ c.broadcastHeight, c.commitResolution.MaturityDelay,
+ )
+ }
+
+ // TODO(roasbeef): instead of ading ctrl block to the sign desc, make
+ // new input type, have sweeper set it?
+
+ // Calculate the budget for the sweeping this input.
+ budget := calculateBudget(
+ btcutil.Amount(inp.SignDesc().Output.Value),
+ c.Budget.ToLocalRatio, c.Budget.ToLocal,
+ )
+ c.log.Infof("sweeping commit output %v using budget=%v", witnessType,
+ budget)
+
+ // With our input constructed, we'll now offer it to the sweeper.
+ resultChan, err := c.Sweeper.SweepInput(
+ inp, sweep.Params{
+ Budget: budget,
+
+ // Specify a nil deadline here as there's no time
+ // pressure.
+ DeadlineHeight: fn.None[int32](),
+ },
+ )
+ if err != nil {
+ c.log.Errorf("unable to sweep input: %v", err)
+
+ return err
+ }
+
+ c.sweepResultChan = resultChan
+
+ return nil
+}
+
+// decideWitnessType returns the witness type for the input.
+func (c *commitSweepResolver) decideWitnessType() (input.WitnessType, error) {
+ var (
+ isLocalCommitTx bool
+ signDesc = c.commitResolution.SelfOutputSignDesc
+ )
+
+ switch {
+ // For taproot channels, we'll know if this is the local commit based
+ // on the timelock value. For remote commitment transactions, the
+ // witness script has a timelock of 1.
+ case c.chanType.IsTaproot():
+ delayKey := c.localChanCfg.DelayBasePoint.PubKey
+ nonDelayKey := c.localChanCfg.PaymentBasePoint.PubKey
+
+ signKey := c.commitResolution.SelfOutputSignDesc.KeyDesc.PubKey
+
+ // If the key in the script is neither of these, we shouldn't
+ // proceed. This should be impossible.
+ if !signKey.IsEqual(delayKey) && !signKey.IsEqual(nonDelayKey) {
+ return nil, fmt.Errorf("unknown sign key %v", signKey)
+ }
+
+ // The commitment transaction is ours iff the signing key is
+ // the delay key.
+ isLocalCommitTx = signKey.IsEqual(delayKey)
+
+ // The output is on our local commitment if the script starts with
+ // OP_IF for the revocation clause. On the remote commitment it will
+ // either be a regular P2WKH or a simple sig spend with a CSV delay.
+ default:
+ isLocalCommitTx = signDesc.WitnessScript[0] == txscript.OP_IF
+ }
+
+ isDelayedOutput := c.commitResolution.MaturityDelay != 0
+
+ c.log.Debugf("isDelayedOutput=%v, isLocalCommitTx=%v", isDelayedOutput,
+ isLocalCommitTx)
+
+ // There're three types of commitments, those that have tweaks for the
+ // remote key (us in this case), those that don't, and a third where
+ // there is no tweak and the output is delayed. On the local commitment
+ // our output will always be delayed. We'll rely on the presence of the
+ // commitment tweak to discern which type of commitment this is.
+ var witnessType input.WitnessType
+ switch {
+ // The local delayed output for a taproot channel.
+ case isLocalCommitTx && c.chanType.IsTaproot():
+ witnessType = input.TaprootLocalCommitSpend
+
+ // The CSV 1 delayed output for a taproot channel.
+ case !isLocalCommitTx && c.chanType.IsTaproot():
+ witnessType = input.TaprootRemoteCommitSpend
+
+ // Delayed output to us on our local commitment for a channel lease in
+ // which we are the initiator.
+ case isLocalCommitTx && c.hasCLTV():
+ witnessType = input.LeaseCommitmentTimeLock
+
+ // Delayed output to us on our local commitment.
+ case isLocalCommitTx:
+ witnessType = input.CommitmentTimeLock
+
+ // A confirmed output to us on the remote commitment for a channel lease
+ // in which we are the initiator.
+ case isDelayedOutput && c.hasCLTV():
+ witnessType = input.LeaseCommitmentToRemoteConfirmed
+
+ // A confirmed output to us on the remote commitment.
+ case isDelayedOutput:
+ witnessType = input.CommitmentToRemoteConfirmed
+
+ // A non-delayed output on the remote commitment where the key is
+ // tweakless.
+ case c.commitResolution.SelfOutputSignDesc.SingleTweak == nil:
+ witnessType = input.CommitSpendNoDelayTweakless
+
+ // A non-delayed output on the remote commitment where the key is
+ // tweaked.
+ default:
+ witnessType = input.CommitmentNoDelay
+ }
+
+ return witnessType, nil
+}
diff --git a/contractcourt/commit_sweep_resolver_test.go b/contractcourt/commit_sweep_resolver_test.go
index f2b43b0f80..03b424c34c 100644
--- a/contractcourt/commit_sweep_resolver_test.go
+++ b/contractcourt/commit_sweep_resolver_test.go
@@ -15,6 +15,7 @@ import (
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
"github.com/lightningnetwork/lnd/sweep"
+ "github.com/stretchr/testify/require"
)
type commitSweepResolverTestContext struct {
@@ -82,7 +83,10 @@ func (i *commitSweepResolverTestContext) resolve() {
// Start resolver.
i.resolverResultChan = make(chan resolveResult, 1)
go func() {
- nextResolver, err := i.resolver.Resolve(false)
+ err := i.resolver.Launch()
+ require.NoError(i.t, err)
+
+ nextResolver, err := i.resolver.Resolve()
i.resolverResultChan <- resolveResult{
nextResolver: nextResolver,
err: err,
@@ -90,12 +94,6 @@ func (i *commitSweepResolverTestContext) resolve() {
}()
}
-func (i *commitSweepResolverTestContext) notifyEpoch(height int32) {
- i.notifier.EpochChan <- &chainntnfs.BlockEpoch{
- Height: height,
- }
-}
-
func (i *commitSweepResolverTestContext) waitForResult() {
i.t.Helper()
@@ -292,22 +290,10 @@ func testCommitSweepResolverDelay(t *testing.T, sweepErr error) {
t.Fatal("report maturity height incorrect")
}
- // Notify initial block height. The csv lock is still in effect, so we
- // don't expect any sweep to happen yet.
- ctx.notifyEpoch(testInitialBlockHeight)
-
- select {
- case <-ctx.sweeper.sweptInputs:
- t.Fatal("no sweep expected")
- case <-time.After(sweepProcessInterval):
- }
-
- // A new block arrives. The commit tx confirmed at height -1 and the csv
- // is 3, so a spend will be valid in the first block after height +1.
- ctx.notifyEpoch(testInitialBlockHeight + 1)
-
- <-ctx.sweeper.sweptInputs
-
+ // Notify initial block height. Although the csv lock is still in
+ // effect, we expect the input being sent to the sweeper before the csv
+ // lock expires.
+ //
// Set the resolution report outcome based on whether our sweep
// succeeded.
outcome := channeldb.ResolverOutcomeClaimed
diff --git a/contractcourt/contract_resolver.go b/contractcourt/contract_resolver.go
index 691822610a..737c36d007 100644
--- a/contractcourt/contract_resolver.go
+++ b/contractcourt/contract_resolver.go
@@ -5,12 +5,14 @@ import (
"errors"
"fmt"
"io"
+ "sync/atomic"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btclog/v2"
"github.com/lightningnetwork/lnd/build"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/fn"
+ "github.com/lightningnetwork/lnd/sweep"
)
var (
@@ -36,6 +38,17 @@ type ContractResolver interface {
// resides within.
ResolverKey() []byte
+ // Launch starts the resolver by constructing an input and offering it
+ // to the sweeper. Once offered, it's expected to monitor the sweeping
+ // result in a goroutine invoked by calling Resolve.
+ //
+ // NOTE: We can call `Resolve` inside a goroutine at the end of this
+ // method to avoid calling it in the ChannelArbitrator. However, there
+ // are some DB-related operations such as SwapContract/ResolveContract
+ // which need to be done inside the resolvers instead, which needs a
+ // deeper refactoring.
+ Launch() error
+
// Resolve instructs the contract resolver to resolve the output
// on-chain. Once the output has been *fully* resolved, the function
// should return immediately with a nil ContractResolver value for the
@@ -43,7 +56,7 @@ type ContractResolver interface {
// resolution, then another resolve is returned.
//
// NOTE: This function MUST be run as a goroutine.
- Resolve(immediate bool) (ContractResolver, error)
+ Resolve() (ContractResolver, error)
// SupplementState allows the user of a ContractResolver to supplement
// it with state required for the proper resolution of a contract.
@@ -110,6 +123,18 @@ type contractResolverKit struct {
log btclog.Logger
quit chan struct{}
+
+ // sweepResultChan is the result chan returned from calling
+ // `SweepInput`. It should be mounted to the specific resolver once the
+ // input has been offered to the sweeper.
+ sweepResultChan chan sweep.Result
+
+ // launched specifies whether the resolver has been launched. Calling
+ // `Launch` will be a no-op if this is true.
+ launched atomic.Bool
+
+ // resolved reflects if the contract has been fully resolved or not.
+ resolved atomic.Bool
}
// newContractResolverKit instantiates the mix-in struct.
@@ -121,11 +146,27 @@ func newContractResolverKit(cfg ResolverConfig) *contractResolverKit {
}
// initLogger initializes the resolver-specific logger.
-func (r *contractResolverKit) initLogger(resolver ContractResolver) {
- logPrefix := fmt.Sprintf("%T(%v):", resolver, r.ChanPoint)
+func (r *contractResolverKit) initLogger(prefix string) {
+ logPrefix := fmt.Sprintf("ChannelArbitrator(%v): %s:", r.ShortChanID,
+ prefix)
+
+ // If the ShortChanID is empty, we use the ChannelPoint instead.
+ if r.ShortChanID.IsDefault() {
+ logPrefix = fmt.Sprintf("ChannelArbitrator(%v): %s:",
+ r.ChanPoint, prefix)
+ }
+
r.log = build.NewPrefixLog(logPrefix, log)
}
+// IsResolved returns true if the stored state in the resolve is fully
+// resolved. In this case the target output can be forgotten.
+//
+// NOTE: Part of the ContractResolver interface.
+func (r *contractResolverKit) IsResolved() bool {
+ return r.resolved.Load()
+}
+
var (
// errResolverShuttingDown is returned when the resolver stops
// progressing because it received the quit signal.
diff --git a/contractcourt/htlc_incoming_contest_resolver.go b/contractcourt/htlc_incoming_contest_resolver.go
index 6bda4e398b..0c9013f811 100644
--- a/contractcourt/htlc_incoming_contest_resolver.go
+++ b/contractcourt/htlc_incoming_contest_resolver.go
@@ -78,6 +78,48 @@ func (h *htlcIncomingContestResolver) processFinalHtlcFail() error {
return nil
}
+// Launch will call the inner resolver's launch method if the preimage can be
+// found, otherwise it's a no-op.
+func (h *htlcIncomingContestResolver) Launch() error {
+ // NOTE: we don't mark this resolver as launched as the inner resolver
+ // will set it when it's launched.
+ if h.launched.Load() {
+ h.log.Tracef("already launched")
+ return nil
+ }
+
+ // If the HTLC has custom records, then for now we'll pause resolution.
+ //
+ // TODO(roasbeef): Implement resolving HTLCs with custom records
+ // (follow-up PR).
+ if len(h.htlc.CustomRecords) != 0 {
+ select { //nolint:gosimple
+ case <-h.quit:
+ return errResolverShuttingDown
+ }
+ }
+
+ h.log.Debugf("launching contest resolver...")
+
+ // Query the preimage and apply it if we already know it.
+ applied, err := h.findAndapplyPreimage()
+ if err != nil {
+ return err
+ }
+
+ // No preimage found, leave it to be handled by the resolver.
+ if !applied {
+ return nil
+ }
+
+ h.log.Debugf("found preimage for htlc=%x, launching success resolver",
+ h.htlc.RHash)
+
+ // Once we've applied the preimage, we'll launch the inner resolver to
+ // attempt to claim the HTLC.
+ return h.htlcSuccessResolver.Launch()
+}
+
// Resolve attempts to resolve this contract. As we don't yet know of the
// preimage for the contract, we'll wait for one of two things to happen:
//
@@ -90,12 +132,11 @@ func (h *htlcIncomingContestResolver) processFinalHtlcFail() error {
// as we have no remaining actions left at our disposal.
//
// NOTE: Part of the ContractResolver interface.
-func (h *htlcIncomingContestResolver) Resolve(
- _ bool) (ContractResolver, error) {
-
+func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) {
// If we're already full resolved, then we don't have anything further
// to do.
- if h.resolved {
+ if h.IsResolved() {
+ h.log.Errorf("already resolved")
return nil, nil
}
@@ -103,15 +144,14 @@ func (h *htlcIncomingContestResolver) Resolve(
// now.
payload, nextHopOnionBlob, err := h.decodePayload()
if err != nil {
- log.Debugf("ChannelArbitrator(%v): cannot decode payload of "+
- "htlc %v", h.ChanPoint, h.HtlcPoint())
+ h.log.Debugf("cannot decode payload of htlc %v", h.HtlcPoint())
// If we've locked in an htlc with an invalid payload on our
// commitment tx, we don't need to resolve it. The other party
// will time it out and get their funds back. This situation
// can present itself when we crash before processRemoteAdds in
// the link has ran.
- h.resolved = true
+ h.resolved.Store(true)
if err := h.processFinalHtlcFail(); err != nil {
return nil, err
@@ -164,7 +204,7 @@ func (h *htlcIncomingContestResolver) Resolve(
log.Infof("%T(%v): HTLC has timed out (expiry=%v, height=%v), "+
"abandoning", h, h.htlcResolution.ClaimOutpoint,
h.htlcExpiry, currentHeight)
- h.resolved = true
+ h.resolved.Store(true)
if err := h.processFinalHtlcFail(); err != nil {
return nil, err
@@ -179,65 +219,6 @@ func (h *htlcIncomingContestResolver) Resolve(
return nil, h.Checkpoint(h, report)
}
- // applyPreimage is a helper function that will populate our internal
- // resolver with the preimage we learn of. This should be called once
- // the preimage is revealed so the inner resolver can properly complete
- // its duties. The error return value indicates whether the preimage
- // was properly applied.
- applyPreimage := func(preimage lntypes.Preimage) error {
- // Sanity check to see if this preimage matches our htlc. At
- // this point it should never happen that it does not match.
- if !preimage.Matches(h.htlc.RHash) {
- return errors.New("preimage does not match hash")
- }
-
- // Update htlcResolution with the matching preimage.
- h.htlcResolution.Preimage = preimage
-
- log.Infof("%T(%v): applied preimage=%v", h,
- h.htlcResolution.ClaimOutpoint, preimage)
-
- isSecondLevel := h.htlcResolution.SignedSuccessTx != nil
-
- // If we didn't have to go to the second level to claim (this
- // is the remote commitment transaction), then we don't need to
- // modify our canned witness.
- if !isSecondLevel {
- return nil
- }
-
- isTaproot := txscript.IsPayToTaproot(
- h.htlcResolution.SignedSuccessTx.TxOut[0].PkScript,
- )
-
- // If this is our commitment transaction, then we'll need to
- // populate the witness for the second-level HTLC transaction.
- switch {
- // For taproot channels, the witness for sweeping with success
- // looks like:
- // -
- //
- //
- // So we'll insert it at the 3rd index of the witness.
- case isTaproot:
- //nolint:lll
- h.htlcResolution.SignedSuccessTx.TxIn[0].Witness[2] = preimage[:]
-
- // Within the witness for the success transaction, the
- // preimage is the 4th element as it looks like:
- //
- // * <0>
- //
- // We'll populate it within the witness, as since this
- // was a "contest" resolver, we didn't yet know of the
- // preimage.
- case !isTaproot:
- h.htlcResolution.SignedSuccessTx.TxIn[0].Witness[3] = preimage[:]
- }
-
- return nil
- }
-
// Define a closure to process htlc resolutions either directly or
// triggered by future notifications.
processHtlcResolution := func(e invoices.HtlcResolution) (
@@ -249,7 +230,7 @@ func (h *htlcIncomingContestResolver) Resolve(
// If the htlc resolution was a settle, apply the
// preimage and return a success resolver.
case *invoices.HtlcSettleResolution:
- err := applyPreimage(resolution.Preimage)
+ err := h.applyPreimage(resolution.Preimage)
if err != nil {
return nil, err
}
@@ -264,7 +245,7 @@ func (h *htlcIncomingContestResolver) Resolve(
h.htlcResolution.ClaimOutpoint,
h.htlcExpiry, currentHeight)
- h.resolved = true
+ h.resolved.Store(true)
if err := h.processFinalHtlcFail(); err != nil {
return nil, err
@@ -314,6 +295,9 @@ func (h *htlcIncomingContestResolver) Resolve(
return nil, err
}
+ h.log.Debugf("received resolution from registry: %v",
+ resolution)
+
defer func() {
h.Registry.HodlUnsubscribeAll(hodlQueue.ChanIn())
@@ -371,7 +355,9 @@ func (h *htlcIncomingContestResolver) Resolve(
// However, we don't know how to ourselves, so we'll
// return our inner resolver which has the knowledge to
// do so.
- if err := applyPreimage(preimage); err != nil {
+ h.log.Debugf("Found preimage for htlc=%x", h.htlc.RHash)
+
+ if err := h.applyPreimage(preimage); err != nil {
return nil, err
}
@@ -390,7 +376,10 @@ func (h *htlcIncomingContestResolver) Resolve(
continue
}
- if err := applyPreimage(preimage); err != nil {
+ h.log.Debugf("Received preimage for htlc=%x",
+ h.htlc.RHash)
+
+ if err := h.applyPreimage(preimage); err != nil {
return nil, err
}
@@ -417,7 +406,7 @@ func (h *htlcIncomingContestResolver) Resolve(
"(expiry=%v, height=%v), abandoning", h,
h.htlcResolution.ClaimOutpoint,
h.htlcExpiry, currentHeight)
- h.resolved = true
+ h.resolved.Store(true)
if err := h.processFinalHtlcFail(); err != nil {
return nil, err
@@ -437,6 +426,76 @@ func (h *htlcIncomingContestResolver) Resolve(
}
}
+// applyPreimage is a helper function that will populate our internal resolver
+// with the preimage we learn of. This should be called once the preimage is
+// revealed so the inner resolver can properly complete its duties. The error
+// return value indicates whether the preimage was properly applied.
+func (h *htlcIncomingContestResolver) applyPreimage(
+ preimage lntypes.Preimage) error {
+
+ // Sanity check to see if this preimage matches our htlc. At this point
+ // it should never happen that it does not match.
+ if !preimage.Matches(h.htlc.RHash) {
+ return errors.New("preimage does not match hash")
+ }
+
+ // We may already have the preimage since both the `Launch` and
+ // `Resolve` methods will look for it.
+ if h.htlcResolution.Preimage != lntypes.ZeroHash {
+ h.log.Debugf("already applied preimage for htlc=%x",
+ h.htlc.RHash)
+
+ return nil
+ }
+
+ // Update htlcResolution with the matching preimage.
+ h.htlcResolution.Preimage = preimage
+
+ log.Infof("%T(%v): applied preimage=%v", h,
+ h.htlcResolution.ClaimOutpoint, preimage)
+
+ isSecondLevel := h.htlcResolution.SignedSuccessTx != nil
+
+ // If we didn't have to go to the second level to claim (this
+ // is the remote commitment transaction), then we don't need to
+ // modify our canned witness.
+ if !isSecondLevel {
+ return nil
+ }
+
+ isTaproot := txscript.IsPayToTaproot(
+ h.htlcResolution.SignedSuccessTx.TxOut[0].PkScript,
+ )
+
+ // If this is our commitment transaction, then we'll need to
+ // populate the witness for the second-level HTLC transaction.
+ switch {
+ // For taproot channels, the witness for sweeping with success
+ // looks like:
+ // -
+ //
+ //
+ // So we'll insert it at the 3rd index of the witness.
+ case isTaproot:
+ //nolint:lll
+ h.htlcResolution.SignedSuccessTx.TxIn[0].Witness[2] = preimage[:]
+
+ // Within the witness for the success transaction, the
+ // preimage is the 4th element as it looks like:
+ //
+ // * <0>
+ //
+ // We'll populate it within the witness, as since this
+ // was a "contest" resolver, we didn't yet know of the
+ // preimage.
+ case !isTaproot:
+ //nolint:lll
+ h.htlcResolution.SignedSuccessTx.TxIn[0].Witness[3] = preimage[:]
+ }
+
+ return nil
+}
+
// report returns a report on the resolution state of the contract.
func (h *htlcIncomingContestResolver) report() *ContractReport {
// No locking needed as these values are read-only.
@@ -463,17 +522,11 @@ func (h *htlcIncomingContestResolver) report() *ContractReport {
//
// NOTE: Part of the ContractResolver interface.
func (h *htlcIncomingContestResolver) Stop() {
+ h.log.Debugf("stopping...")
+ defer h.log.Debugf("stopped")
close(h.quit)
}
-// IsResolved returns true if the stored state in the resolve is fully
-// resolved. In this case the target output can be forgotten.
-//
-// NOTE: Part of the ContractResolver interface.
-func (h *htlcIncomingContestResolver) IsResolved() bool {
- return h.resolved
-}
-
// Encode writes an encoded version of the ContractResolver into the passed
// Writer.
//
@@ -562,3 +615,77 @@ func (h *htlcIncomingContestResolver) decodePayload() (*hop.Payload,
// A compile time assertion to ensure htlcIncomingContestResolver meets the
// ContractResolver interface.
var _ htlcContractResolver = (*htlcIncomingContestResolver)(nil)
+
+// findAndapplyPreimage attempts to find the preimage for the incoming HTLC. If
+// found, it will be applied.
+func (h *htlcIncomingContestResolver) findAndapplyPreimage() (bool, error) {
+ // Query to see if we already know the preimage.
+ preimage, ok := h.PreimageDB.LookupPreimage(h.htlc.RHash)
+
+ // If the preimage is known, we'll apply it.
+ if ok {
+ if err := h.applyPreimage(preimage); err != nil {
+ return false, err
+ }
+
+ // Successfully applied the preimage, we can now return.
+ return true, nil
+ }
+
+ // First try to parse the payload.
+ payload, _, err := h.decodePayload()
+ if err != nil {
+ h.log.Errorf("Cannot decode payload of htlc %v", h.HtlcPoint())
+
+ // If we cannot decode the payload, we will return a nil error
+ // and let it to be handled in `Resolve`.
+ return false, nil
+ }
+
+ // Exit early if this is not the exit hop, which means we are not the
+ // payment receiver and don't have preimage.
+ if payload.FwdInfo.NextHop != hop.Exit {
+ return false, nil
+ }
+
+ // Notify registry that we are potentially resolving as an exit hop
+ // on-chain. If this HTLC indeed pays to an existing invoice, the
+ // invoice registry will tell us what to do with the HTLC. This is
+ // identical to HTLC resolution in the link.
+ circuitKey := models.CircuitKey{
+ ChanID: h.ShortChanID,
+ HtlcID: h.htlc.HtlcIndex,
+ }
+
+ // Try get the resolution - if it doesn't give us a resolution
+ // immediately, we'll assume we don't know it yet and let the `Resolve`
+ // handle the waiting.
+ //
+ // NOTE: we use a nil subscriber here and a zero current height as we
+ // are only interested in the settle resolution.
+ //
+ // TODO(yy): move this logic to link and let the preimage be accessed
+ // via the preimage beacon.
+ resolution, err := h.Registry.NotifyExitHopHtlc(
+ h.htlc.RHash, h.htlc.Amt, h.htlcExpiry, 0,
+ circuitKey, nil, nil, payload,
+ )
+ if err != nil {
+ return false, err
+ }
+
+ res, ok := resolution.(*invoices.HtlcSettleResolution)
+
+ // Exit early if it's not a settle resolution.
+ if !ok {
+ return false, nil
+ }
+
+ // Otherwise we have a settle resolution, apply the preimage.
+ err = h.applyPreimage(res.Preimage)
+ if err != nil {
+ return false, err
+ }
+
+ return true, nil
+}
diff --git a/contractcourt/htlc_incoming_contest_resolver_test.go b/contractcourt/htlc_incoming_contest_resolver_test.go
index 55d93a6fb3..9b3204f2ee 100644
--- a/contractcourt/htlc_incoming_contest_resolver_test.go
+++ b/contractcourt/htlc_incoming_contest_resolver_test.go
@@ -5,11 +5,13 @@ import (
"io"
"testing"
+ "github.com/btcsuite/btcd/wire"
sphinx "github.com/lightningnetwork/lightning-onion"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/models"
"github.com/lightningnetwork/lnd/htlcswitch/hop"
+ "github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/invoices"
"github.com/lightningnetwork/lnd/kvdb"
"github.com/lightningnetwork/lnd/lnmock"
@@ -356,6 +358,7 @@ func newIncomingResolverTestContext(t *testing.T, isExit bool) *incomingResolver
return nil
},
+ Sweeper: newMockSweeper(),
},
PutResolverReport: func(_ kvdb.RwTx,
_ *channeldb.ResolverReport) error {
@@ -374,10 +377,16 @@ func newIncomingResolverTestContext(t *testing.T, isExit bool) *incomingResolver
},
}
+ res := lnwallet.IncomingHtlcResolution{
+ SweepSignDesc: input.SignDescriptor{
+ Output: &wire.TxOut{},
+ },
+ }
+
c.resolver = &htlcIncomingContestResolver{
htlcSuccessResolver: &htlcSuccessResolver{
contractResolverKit: *newContractResolverKit(cfg),
- htlcResolution: lnwallet.IncomingHtlcResolution{},
+ htlcResolution: res,
htlc: channeldb.HTLC{
Amt: lnwire.MilliSatoshi(testHtlcAmount),
RHash: testResHash,
@@ -386,6 +395,7 @@ func newIncomingResolverTestContext(t *testing.T, isExit bool) *incomingResolver
},
htlcExpiry: testHtlcExpiry,
}
+ c.resolver.initLogger("htlcIncomingContestResolver")
return c
}
@@ -395,7 +405,11 @@ func (i *incomingResolverTestContext) resolve() {
i.resolveErr = make(chan error, 1)
go func() {
var err error
- i.nextResolver, err = i.resolver.Resolve(false)
+
+ err = i.resolver.Launch()
+ require.NoError(i.t, err)
+
+ i.nextResolver, err = i.resolver.Resolve()
i.resolveErr <- err
}()
diff --git a/contractcourt/htlc_lease_resolver.go b/contractcourt/htlc_lease_resolver.go
index 53fa893553..c904f21d1b 100644
--- a/contractcourt/htlc_lease_resolver.go
+++ b/contractcourt/htlc_lease_resolver.go
@@ -57,10 +57,10 @@ func (h *htlcLeaseResolver) makeSweepInput(op *wire.OutPoint,
signDesc *input.SignDescriptor, csvDelay, broadcastHeight uint32,
payHash [32]byte, resBlob fn.Option[tlv.Blob]) *input.BaseInput {
- if h.hasCLTV() {
- log.Infof("%T(%x): CSV and CLTV locks expired, offering "+
- "second-layer output to sweeper: %v", h, payHash, op)
+ log.Infof("%T(%x): offering second-layer output to sweeper: %v", h,
+ payHash, op)
+ if h.hasCLTV() {
return input.NewCsvInputWithCltv(
op, cltvWtype, signDesc,
broadcastHeight, csvDelay,
diff --git a/contractcourt/htlc_outgoing_contest_resolver.go b/contractcourt/htlc_outgoing_contest_resolver.go
index 2466544c98..2e458b8224 100644
--- a/contractcourt/htlc_outgoing_contest_resolver.go
+++ b/contractcourt/htlc_outgoing_contest_resolver.go
@@ -1,7 +1,6 @@
package contractcourt
import (
- "fmt"
"io"
"github.com/btcsuite/btcd/btcutil"
@@ -36,6 +35,47 @@ func newOutgoingContestResolver(res lnwallet.OutgoingHtlcResolution,
}
}
+// Launch will call the inner resolver's launch method if the expiry height has
+// been reached, otherwise it's a no-op.
+func (h *htlcOutgoingContestResolver) Launch() error {
+ // NOTE: we don't mark this resolver as launched as the inner resolver
+ // will set it when it's launched.
+ if h.launched.Load() {
+ h.log.Tracef("already launched")
+ return nil
+ }
+
+ // If the HTLC has custom records, then for now we'll pause resolution.
+ //
+ // TODO(roasbeef): Implement resolving HTLCs with custom records
+ // (follow-up PR).
+ if len(h.htlc.CustomRecords) != 0 {
+ select { //nolint:gosimple
+ case <-h.quit:
+ return errResolverShuttingDown
+ }
+ }
+
+ h.log.Debugf("launching contest resolver...")
+
+ _, bestHeight, err := h.ChainIO.GetBestBlock()
+ if err != nil {
+ return err
+ }
+
+ if uint32(bestHeight) < h.htlcResolution.Expiry {
+ return nil
+ }
+
+ // If the current height is >= expiry, then a timeout path spend will
+ // be valid to be included in the next block, and we can immediately
+ // return the resolver.
+ h.log.Infof("expired (height=%v, expiry=%v), launching timeout "+
+ "resolver", bestHeight, h.htlcResolution.Expiry)
+
+ return h.htlcTimeoutResolver.Launch()
+}
+
// Resolve commences the resolution of this contract. As this contract hasn't
// yet timed out, we'll wait for one of two things to happen
//
@@ -49,12 +89,11 @@ func newOutgoingContestResolver(res lnwallet.OutgoingHtlcResolution,
// When either of these two things happens, we'll create a new resolver which
// is able to handle the final resolution of the contract. We're only the pivot
// point.
-func (h *htlcOutgoingContestResolver) Resolve(
- _ bool) (ContractResolver, error) {
-
+func (h *htlcOutgoingContestResolver) Resolve() (ContractResolver, error) {
// If we're already full resolved, then we don't have anything further
// to do.
- if h.resolved {
+ if h.IsResolved() {
+ h.log.Errorf("already resolved")
return nil, nil
}
@@ -88,8 +127,7 @@ func (h *htlcOutgoingContestResolver) Resolve(
return nil, errResolverShuttingDown
}
- // TODO(roasbeef): Checkpoint?
- return h.claimCleanUp(commitSpend)
+ return nil, h.claimCleanUp(commitSpend)
// If it hasn't, then we'll watch for both the expiration, and the
// sweeping out this output.
@@ -126,12 +164,18 @@ func (h *htlcOutgoingContestResolver) Resolve(
// finalized` will be returned and the broadcast will
// fail.
newHeight := uint32(newBlock.Height)
- if newHeight >= h.htlcResolution.Expiry {
- log.Infof("%T(%v): HTLC has expired "+
+ expiry := h.htlcResolution.Expiry
+ if h.isZeroFeeOutput() {
+ expiry--
+ }
+
+ if newHeight >= expiry {
+ log.Infof("%T(%v): HTLC about to expire "+
"(height=%v, expiry=%v), transforming "+
"into timeout resolver", h,
h.htlcResolution.ClaimOutpoint,
newHeight, h.htlcResolution.Expiry)
+
return h.htlcTimeoutResolver, nil
}
@@ -146,10 +190,10 @@ func (h *htlcOutgoingContestResolver) Resolve(
// party is by revealing the preimage. So we'll perform
// our duties to clean up the contract once it has been
// claimed.
- return h.claimCleanUp(commitSpend)
+ return nil, h.claimCleanUp(commitSpend)
case <-h.quit:
- return nil, fmt.Errorf("resolver canceled")
+ return nil, errResolverShuttingDown
}
}
}
@@ -180,17 +224,11 @@ func (h *htlcOutgoingContestResolver) report() *ContractReport {
//
// NOTE: Part of the ContractResolver interface.
func (h *htlcOutgoingContestResolver) Stop() {
+ h.log.Debugf("stopping...")
+ defer h.log.Debugf("stopped")
close(h.quit)
}
-// IsResolved returns true if the stored state in the resolve is fully
-// resolved. In this case the target output can be forgotten.
-//
-// NOTE: Part of the ContractResolver interface.
-func (h *htlcOutgoingContestResolver) IsResolved() bool {
- return h.resolved
-}
-
// Encode writes an encoded version of the ContractResolver into the passed
// Writer.
//
diff --git a/contractcourt/htlc_outgoing_contest_resolver_test.go b/contractcourt/htlc_outgoing_contest_resolver_test.go
index f67c34ff4e..18b4486c5e 100644
--- a/contractcourt/htlc_outgoing_contest_resolver_test.go
+++ b/contractcourt/htlc_outgoing_contest_resolver_test.go
@@ -15,6 +15,7 @@ import (
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/lightningnetwork/lnd/lnwire"
+ "github.com/stretchr/testify/require"
)
const (
@@ -159,6 +160,7 @@ func newOutgoingResolverTestContext(t *testing.T) *outgoingResolverTestContext {
return nil
},
+ ChainIO: &mock.ChainIO{},
},
PutResolverReport: func(_ kvdb.RwTx,
_ *channeldb.ResolverReport) error {
@@ -195,6 +197,7 @@ func newOutgoingResolverTestContext(t *testing.T) *outgoingResolverTestContext {
},
},
}
+ resolver.initLogger("htlcOutgoingContestResolver")
return &outgoingResolverTestContext{
resolver: resolver,
@@ -209,7 +212,10 @@ func (i *outgoingResolverTestContext) resolve() {
// Start resolver.
i.resolverResultChan = make(chan resolveResult, 1)
go func() {
- nextResolver, err := i.resolver.Resolve(false)
+ err := i.resolver.Launch()
+ require.NoError(i.t, err)
+
+ nextResolver, err := i.resolver.Resolve()
i.resolverResultChan <- resolveResult{
nextResolver: nextResolver,
err: err,
diff --git a/contractcourt/htlc_success_resolver.go b/contractcourt/htlc_success_resolver.go
index 4c9d2b200b..348c47769e 100644
--- a/contractcourt/htlc_success_resolver.go
+++ b/contractcourt/htlc_success_resolver.go
@@ -2,6 +2,7 @@ package contractcourt
import (
"encoding/binary"
+ "fmt"
"io"
"sync"
@@ -9,8 +10,6 @@ import (
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
- "github.com/davecgh/go-spew/spew"
- "github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/models"
"github.com/lightningnetwork/lnd/fn"
@@ -43,9 +42,6 @@ type htlcSuccessResolver struct {
// second-level output (true).
outputIncubating bool
- // resolved reflects if the contract has been fully resolved or not.
- resolved bool
-
// broadcastHeight is the height that the original contract was
// broadcast to the main-chain at. We'll use this value to bound any
// historical queries to the chain for spends/confirmations.
@@ -81,27 +77,30 @@ func newSuccessResolver(res lnwallet.IncomingHtlcResolution,
}
h.initReport()
+ h.initLogger(fmt.Sprintf("%T(%v)", h, h.outpoint()))
return h
}
-// ResolverKey returns an identifier which should be globally unique for this
-// particular resolver within the chain the original contract resides within.
-//
-// NOTE: Part of the ContractResolver interface.
-func (h *htlcSuccessResolver) ResolverKey() []byte {
+// outpoint returns the outpoint of the HTLC output we're attempting to sweep.
+func (h *htlcSuccessResolver) outpoint() wire.OutPoint {
// The primary key for this resolver will be the outpoint of the HTLC
// on the commitment transaction itself. If this is our commitment,
// then the output can be found within the signed success tx,
// otherwise, it's just the ClaimOutpoint.
- var op wire.OutPoint
if h.htlcResolution.SignedSuccessTx != nil {
- op = h.htlcResolution.SignedSuccessTx.TxIn[0].PreviousOutPoint
- } else {
- op = h.htlcResolution.ClaimOutpoint
+ return h.htlcResolution.SignedSuccessTx.TxIn[0].PreviousOutPoint
}
- key := newResolverID(op)
+ return h.htlcResolution.ClaimOutpoint
+}
+
+// ResolverKey returns an identifier which should be globally unique for this
+// particular resolver within the chain the original contract resides within.
+//
+// NOTE: Part of the ContractResolver interface.
+func (h *htlcSuccessResolver) ResolverKey() []byte {
+ key := newResolverID(h.outpoint())
return key[:]
}
@@ -112,423 +111,77 @@ func (h *htlcSuccessResolver) ResolverKey() []byte {
// anymore. Every HTLC has already passed through the incoming contest resolver
// and in there the invoice was already marked as settled.
//
-// TODO(roasbeef): create multi to batch
-//
// NOTE: Part of the ContractResolver interface.
-func (h *htlcSuccessResolver) Resolve(
- immediate bool) (ContractResolver, error) {
-
- // If we're already resolved, then we can exit early.
- if h.resolved {
- return nil, nil
- }
-
- // If we don't have a success transaction, then this means that this is
- // an output on the remote party's commitment transaction.
- if h.htlcResolution.SignedSuccessTx == nil {
- return h.resolveRemoteCommitOutput(immediate)
- }
-
- // Otherwise this an output on our own commitment, and we must start by
- // broadcasting the second-level success transaction.
- secondLevelOutpoint, err := h.broadcastSuccessTx(immediate)
- if err != nil {
- return nil, err
- }
-
- // To wrap this up, we'll wait until the second-level transaction has
- // been spent, then fully resolve the contract.
- log.Infof("%T(%x): waiting for second-level HTLC output to be spent "+
- "after csv_delay=%v", h, h.htlc.RHash[:], h.htlcResolution.CsvDelay)
-
- spend, err := waitForSpend(
- secondLevelOutpoint,
- h.htlcResolution.SweepSignDesc.Output.PkScript,
- h.broadcastHeight, h.Notifier, h.quit,
- )
- if err != nil {
- return nil, err
- }
-
- h.reportLock.Lock()
- h.currentReport.RecoveredBalance = h.currentReport.LimboBalance
- h.currentReport.LimboBalance = 0
- h.reportLock.Unlock()
-
- h.resolved = true
- return nil, h.checkpointClaim(
- spend.SpenderTxHash, channeldb.ResolverOutcomeClaimed,
- )
-}
-
-// broadcastSuccessTx handles an HTLC output on our local commitment by
-// broadcasting the second-level success transaction. It returns the ultimate
-// outpoint of the second-level tx, that we must wait to be spent for the
-// resolver to be fully resolved.
-func (h *htlcSuccessResolver) broadcastSuccessTx(
- immediate bool) (*wire.OutPoint, error) {
-
- // If we have non-nil SignDetails, this means that have a 2nd level
- // HTLC transaction that is signed using sighash SINGLE|ANYONECANPAY
- // (the case for anchor type channels). In this case we can re-sign it
- // and attach fees at will. We let the sweeper handle this job. We use
- // the checkpointed outputIncubating field to determine if we already
- // swept the HTLC output into the second level transaction.
- if h.htlcResolution.SignDetails != nil {
- return h.broadcastReSignedSuccessTx(immediate)
- }
-
- // Otherwise we'll publish the second-level transaction directly and
- // offer the resolution to the nursery to handle.
- log.Infof("%T(%x): broadcasting second-layer transition tx: %v",
- h, h.htlc.RHash[:], spew.Sdump(h.htlcResolution.SignedSuccessTx))
-
- // We'll now broadcast the second layer transaction so we can kick off
- // the claiming process.
- //
- // TODO(roasbeef): after changing sighashes send to tx bundler
- label := labels.MakeLabel(
- labels.LabelTypeChannelClose, &h.ShortChanID,
- )
- err := h.PublishTx(h.htlcResolution.SignedSuccessTx, label)
- if err != nil {
- return nil, err
- }
-
- // Otherwise, this is an output on our commitment transaction. In this
- // case, we'll send it to the incubator, but only if we haven't already
- // done so.
- if !h.outputIncubating {
- log.Infof("%T(%x): incubating incoming htlc output",
- h, h.htlc.RHash[:])
-
- err := h.IncubateOutputs(
- h.ChanPoint, fn.None[lnwallet.OutgoingHtlcResolution](),
- fn.Some(h.htlcResolution),
- h.broadcastHeight, fn.Some(int32(h.htlc.RefundTimeout)),
- )
- if err != nil {
- return nil, err
- }
-
- h.outputIncubating = true
-
- if err := h.Checkpoint(h); err != nil {
- log.Errorf("unable to Checkpoint: %v", err)
- return nil, err
- }
- }
-
- return &h.htlcResolution.ClaimOutpoint, nil
-}
-
-// broadcastReSignedSuccessTx handles the case where we have non-nil
-// SignDetails, and offers the second level transaction to the Sweeper, that
-// will re-sign it and attach fees at will.
//
-//nolint:funlen
-func (h *htlcSuccessResolver) broadcastReSignedSuccessTx(immediate bool) (
- *wire.OutPoint, error) {
-
- // Keep track of the tx spending the HTLC output on the commitment, as
- // this will be the confirmed second-level tx we'll ultimately sweep.
- var commitSpend *chainntnfs.SpendDetail
-
- // We will have to let the sweeper re-sign the success tx and wait for
- // it to confirm, if we haven't already.
- isTaproot := txscript.IsPayToTaproot(
- h.htlcResolution.SweepSignDesc.Output.PkScript,
- )
- if !h.outputIncubating {
- var secondLevelInput input.HtlcSecondLevelAnchorInput
- if isTaproot {
- //nolint:lll
- secondLevelInput = input.MakeHtlcSecondLevelSuccessTaprootInput(
- h.htlcResolution.SignedSuccessTx,
- h.htlcResolution.SignDetails, h.htlcResolution.Preimage,
- h.broadcastHeight,
- input.WithResolutionBlob(
- h.htlcResolution.ResolutionBlob,
- ),
- )
- } else {
- //nolint:lll
- secondLevelInput = input.MakeHtlcSecondLevelSuccessAnchorInput(
- h.htlcResolution.SignedSuccessTx,
- h.htlcResolution.SignDetails, h.htlcResolution.Preimage,
- h.broadcastHeight,
- )
- }
-
- // Calculate the budget for this sweep.
- value := btcutil.Amount(
- secondLevelInput.SignDesc().Output.Value,
- )
- budget := calculateBudget(
- value, h.Budget.DeadlineHTLCRatio,
- h.Budget.DeadlineHTLC,
- )
-
- // The deadline would be the CLTV in this HTLC output. If we
- // are the initiator of this force close, with the default
- // `IncomingBroadcastDelta`, it means we have 10 blocks left
- // when going onchain. Given we need to mine one block to
- // confirm the force close tx, and one more block to trigger
- // the sweep, we have 8 blocks left to sweep the HTLC.
- deadline := fn.Some(int32(h.htlc.RefundTimeout))
-
- log.Infof("%T(%x): offering second-level HTLC success tx to "+
- "sweeper with deadline=%v, budget=%v", h,
- h.htlc.RHash[:], h.htlc.RefundTimeout, budget)
-
- // We'll now offer the second-level transaction to the sweeper.
- _, err := h.Sweeper.SweepInput(
- &secondLevelInput,
- sweep.Params{
- Budget: budget,
- DeadlineHeight: deadline,
- Immediate: immediate,
- },
- )
- if err != nil {
- return nil, err
- }
-
- log.Infof("%T(%x): waiting for second-level HTLC success "+
- "transaction to confirm", h, h.htlc.RHash[:])
-
- // Wait for the second level transaction to confirm.
- commitSpend, err = waitForSpend(
- &h.htlcResolution.SignedSuccessTx.TxIn[0].PreviousOutPoint,
- h.htlcResolution.SignDetails.SignDesc.Output.PkScript,
- h.broadcastHeight, h.Notifier, h.quit,
- )
- if err != nil {
- return nil, err
- }
+// TODO(yy): refactor the interface method to return an error only.
+func (h *htlcSuccessResolver) Resolve() (ContractResolver, error) {
+ var err error
- // Now that the second-level transaction has confirmed, we
- // checkpoint the state so we'll go to the next stage in case
- // of restarts.
- h.outputIncubating = true
- if err := h.Checkpoint(h); err != nil {
- log.Errorf("unable to Checkpoint: %v", err)
- return nil, err
- }
-
- log.Infof("%T(%x): second-level HTLC success transaction "+
- "confirmed!", h, h.htlc.RHash[:])
- }
-
- // If we ended up here after a restart, we must again get the
- // spend notification.
- if commitSpend == nil {
- var err error
- commitSpend, err = waitForSpend(
- &h.htlcResolution.SignedSuccessTx.TxIn[0].PreviousOutPoint,
- h.htlcResolution.SignDetails.SignDesc.Output.PkScript,
- h.broadcastHeight, h.Notifier, h.quit,
- )
- if err != nil {
- return nil, err
- }
- }
-
- // The HTLC success tx has a CSV lock that we must wait for, and if
- // this is a lease enforced channel and we're the imitator, we may need
- // to wait for longer.
- waitHeight := h.deriveWaitHeight(
- h.htlcResolution.CsvDelay, commitSpend,
- )
-
- // Now that the sweeper has broadcasted the second-level transaction,
- // it has confirmed, and we have checkpointed our state, we'll sweep
- // the second level output. We report the resolver has moved the next
- // stage.
- h.reportLock.Lock()
- h.currentReport.Stage = 2
- h.currentReport.MaturityHeight = waitHeight
- h.reportLock.Unlock()
-
- if h.hasCLTV() {
- log.Infof("%T(%x): waiting for CSV and CLTV lock to "+
- "expire at height %v", h, h.htlc.RHash[:],
- waitHeight)
- } else {
- log.Infof("%T(%x): waiting for CSV lock to expire at "+
- "height %v", h, h.htlc.RHash[:], waitHeight)
- }
-
- // Deduct one block so this input is offered to the sweeper one block
- // earlier since the sweeper will wait for one block to trigger the
- // sweeping.
+ // If the HTLC has custom records, then for now we'll pause resolution.
//
- // TODO(yy): this is done so the outputs can be aggregated
- // properly. Suppose CSV locks of five 2nd-level outputs all
- // expire at height 840000, there is a race in block digestion
- // between contractcourt and sweeper:
- // - G1: block 840000 received in contractcourt, it now offers
- // the outputs to the sweeper.
- // - G2: block 840000 received in sweeper, it now starts to
- // sweep the received outputs - there's no guarantee all
- // fives have been received.
- // To solve this, we either offer the outputs earlier, or
- // implement `blockbeat`, and force contractcourt and sweeper
- // to consume each block sequentially.
- waitHeight--
-
- // TODO(yy): let sweeper handles the wait?
- err := waitForHeight(waitHeight, h.Notifier, h.quit)
- if err != nil {
- return nil, err
- }
-
- // We'll use this input index to determine the second-level output
- // index on the transaction, as the signatures requires the indexes to
- // be the same. We don't look for the second-level output script
- // directly, as there might be more than one HTLC output to the same
- // pkScript.
- op := &wire.OutPoint{
- Hash: *commitSpend.SpenderTxHash,
- Index: commitSpend.SpenderInputIndex,
- }
-
- // Let the sweeper sweep the second-level output now that the
- // CSV/CLTV locks have expired.
- var witType input.StandardWitnessType
- if isTaproot {
- witType = input.TaprootHtlcAcceptedSuccessSecondLevel
- } else {
- witType = input.HtlcAcceptedSuccessSecondLevel
+ // TODO(roasbeef): Implement resolving HTLCs with custom records
+ // (follow-up PR).
+ if len(h.htlc.CustomRecords) != 0 {
+ select { //nolint:gosimple
+ case <-h.quit:
+ return nil, errResolverShuttingDown
+ }
}
- inp := h.makeSweepInput(
- op, witType,
- input.LeaseHtlcAcceptedSuccessSecondLevel,
- &h.htlcResolution.SweepSignDesc,
- h.htlcResolution.CsvDelay, uint32(commitSpend.SpendingHeight),
- h.htlc.RHash, h.htlcResolution.ResolutionBlob,
- )
- // Calculate the budget for this sweep.
- budget := calculateBudget(
- btcutil.Amount(inp.SignDesc().Output.Value),
- h.Budget.NoDeadlineHTLCRatio,
- h.Budget.NoDeadlineHTLC,
- )
+ switch {
+ // If we're already resolved, then we can exit early.
+ case h.IsResolved():
+ h.log.Errorf("already resolved")
- log.Infof("%T(%x): offering second-level success tx output to sweeper "+
- "with no deadline and budget=%v at height=%v", h,
- h.htlc.RHash[:], budget, waitHeight)
+ // If this is an output on the remote party's commitment transaction,
+ // use the direct-spend path to sweep the htlc.
+ case h.isRemoteCommitOutput():
+ err = h.resolveRemoteCommitOutput()
- // TODO(roasbeef): need to update above for leased types
- _, err = h.Sweeper.SweepInput(
- inp,
- sweep.Params{
- Budget: budget,
+ // If this is an output on our commitment transaction using post-anchor
+ // channel type, it will be handled by the sweeper.
+ case h.isZeroFeeOutput():
+ err = h.resolveSuccessTx()
- // For second level success tx, there's no rush to get
- // it confirmed, so we use a nil deadline.
- DeadlineHeight: fn.None[int32](),
- },
- )
- if err != nil {
- return nil, err
+ // If this is an output on our own commitment using pre-anchor channel
+ // type, we will publish the success tx and offer the output to the
+ // nursery.
+ default:
+ err = h.resolveLegacySuccessTx()
}
- // Will return this outpoint, when this is spent the resolver is fully
- // resolved.
- return op, nil
+ return nil, err
}
// resolveRemoteCommitOutput handles sweeping an HTLC output on the remote
// commitment with the preimage. In this case we can sweep the output directly,
// and don't have to broadcast a second-level transaction.
-func (h *htlcSuccessResolver) resolveRemoteCommitOutput(immediate bool) (
- ContractResolver, error) {
-
- isTaproot := txscript.IsPayToTaproot(
- h.htlcResolution.SweepSignDesc.Output.PkScript,
- )
-
- // Before we can craft out sweeping transaction, we need to
- // create an input which contains all the items required to add
- // this input to a sweeping transaction, and generate a
- // witness.
- var inp input.Input
- if isTaproot {
- inp = lnutils.Ptr(input.MakeTaprootHtlcSucceedInput(
- &h.htlcResolution.ClaimOutpoint,
- &h.htlcResolution.SweepSignDesc,
- h.htlcResolution.Preimage[:],
- h.broadcastHeight,
- h.htlcResolution.CsvDelay,
- input.WithResolutionBlob(
- h.htlcResolution.ResolutionBlob,
- ),
- ))
- } else {
- inp = lnutils.Ptr(input.MakeHtlcSucceedInput(
- &h.htlcResolution.ClaimOutpoint,
- &h.htlcResolution.SweepSignDesc,
- h.htlcResolution.Preimage[:],
- h.broadcastHeight,
- h.htlcResolution.CsvDelay,
- ))
- }
-
- // Calculate the budget for this sweep.
- budget := calculateBudget(
- btcutil.Amount(inp.SignDesc().Output.Value),
- h.Budget.DeadlineHTLCRatio,
- h.Budget.DeadlineHTLC,
- )
-
- deadline := fn.Some(int32(h.htlc.RefundTimeout))
-
- log.Infof("%T(%x): offering direct-preimage HTLC output to sweeper "+
- "with deadline=%v, budget=%v", h, h.htlc.RHash[:],
- h.htlc.RefundTimeout, budget)
-
- // We'll now offer the direct preimage HTLC to the sweeper.
- _, err := h.Sweeper.SweepInput(
- inp,
- sweep.Params{
- Budget: budget,
- DeadlineHeight: deadline,
- Immediate: immediate,
- },
- )
- if err != nil {
- return nil, err
- }
+func (h *htlcSuccessResolver) resolveRemoteCommitOutput() error {
+ h.log.Info("waiting for direct-preimage spend of the htlc to confirm")
// Wait for the direct-preimage HTLC sweep tx to confirm.
+ //
+ // TODO(yy): use the result chan returned from `SweepInput`.
sweepTxDetails, err := waitForSpend(
&h.htlcResolution.ClaimOutpoint,
h.htlcResolution.SweepSignDesc.Output.PkScript,
h.broadcastHeight, h.Notifier, h.quit,
)
if err != nil {
- return nil, err
+ return err
}
- // Once the transaction has received a sufficient number of
- // confirmations, we'll mark ourselves as fully resolved and exit.
- h.resolved = true
+ // TODO(yy): should also update the `RecoveredBalance` and
+ // `LimboBalance` like other paths?
// Checkpoint the resolver, and write the outcome to disk.
- return nil, h.checkpointClaim(
- sweepTxDetails.SpenderTxHash,
- channeldb.ResolverOutcomeClaimed,
- )
+ return h.checkpointClaim(sweepTxDetails.SpenderTxHash)
}
// checkpointClaim checkpoints the success resolver with the reports it needs.
// If this htlc was claimed two stages, it will write reports for both stages,
// otherwise it will just write for the single htlc claim.
-func (h *htlcSuccessResolver) checkpointClaim(spendTx *chainhash.Hash,
- outcome channeldb.ResolverOutcome) error {
-
+func (h *htlcSuccessResolver) checkpointClaim(spendTx *chainhash.Hash) error {
// Mark the htlc as final settled.
err := h.ChainArbitratorConfig.PutFinalHtlcOutcome(
h.ChannelArbitratorConfig.ShortChanID, h.htlc.HtlcIndex, true,
@@ -556,7 +209,7 @@ func (h *htlcSuccessResolver) checkpointClaim(spendTx *chainhash.Hash,
OutPoint: h.htlcResolution.ClaimOutpoint,
Amount: amt,
ResolverType: channeldb.ResolverTypeIncomingHtlc,
- ResolverOutcome: outcome,
+ ResolverOutcome: channeldb.ResolverOutcomeClaimed,
SpendTxID: spendTx,
},
}
@@ -581,6 +234,7 @@ func (h *htlcSuccessResolver) checkpointClaim(spendTx *chainhash.Hash,
}
// Finally, we checkpoint the resolver with our report(s).
+ h.resolved.Store(true)
return h.Checkpoint(h, reports...)
}
@@ -589,15 +243,10 @@ func (h *htlcSuccessResolver) checkpointClaim(spendTx *chainhash.Hash,
//
// NOTE: Part of the ContractResolver interface.
func (h *htlcSuccessResolver) Stop() {
- close(h.quit)
-}
+ h.log.Debugf("stopping...")
+ defer h.log.Debugf("stopped")
-// IsResolved returns true if the stored state in the resolve is fully
-// resolved. In this case the target output can be forgotten.
-//
-// NOTE: Part of the ContractResolver interface.
-func (h *htlcSuccessResolver) IsResolved() bool {
- return h.resolved
+ close(h.quit)
}
// report returns a report on the resolution state of the contract.
@@ -649,7 +298,7 @@ func (h *htlcSuccessResolver) Encode(w io.Writer) error {
if err := binary.Write(w, endian, h.outputIncubating); err != nil {
return err
}
- if err := binary.Write(w, endian, h.resolved); err != nil {
+ if err := binary.Write(w, endian, h.IsResolved()); err != nil {
return err
}
if err := binary.Write(w, endian, h.broadcastHeight); err != nil {
@@ -688,9 +337,13 @@ func newSuccessResolverFromReader(r io.Reader, resCfg ResolverConfig) (
if err := binary.Read(r, endian, &h.outputIncubating); err != nil {
return nil, err
}
- if err := binary.Read(r, endian, &h.resolved); err != nil {
+
+ var resolved bool
+ if err := binary.Read(r, endian, &resolved); err != nil {
return nil, err
}
+ h.resolved.Store(resolved)
+
if err := binary.Read(r, endian, &h.broadcastHeight); err != nil {
return nil, err
}
@@ -709,6 +362,7 @@ func newSuccessResolverFromReader(r io.Reader, resCfg ResolverConfig) (
}
h.initReport()
+ h.initLogger(fmt.Sprintf("%T(%v)", h, h.outpoint()))
return h, nil
}
@@ -737,3 +391,402 @@ func (h *htlcSuccessResolver) SupplementDeadline(_ fn.Option[int32]) {
// A compile time assertion to ensure htlcSuccessResolver meets the
// ContractResolver interface.
var _ htlcContractResolver = (*htlcSuccessResolver)(nil)
+
+// isRemoteCommitOutput returns a bool to indicate whether the htlc output is
+// on the remote commitment.
+func (h *htlcSuccessResolver) isRemoteCommitOutput() bool {
+ // If we don't have a success transaction, then this means that this is
+ // an output on the remote party's commitment transaction.
+ return h.htlcResolution.SignedSuccessTx == nil
+}
+
+// isZeroFeeOutput returns a boolean indicating whether the htlc output is from
+// a anchor-enabled channel, which uses the sighash SINGLE|ANYONECANPAY.
+func (h *htlcSuccessResolver) isZeroFeeOutput() bool {
+ // If we have non-nil SignDetails, this means it has a 2nd level HTLC
+ // transaction that is signed using sighash SINGLE|ANYONECANPAY (the
+ // case for anchor type channels). In this case we can re-sign it and
+ // attach fees at will.
+ return h.htlcResolution.SignedSuccessTx != nil &&
+ h.htlcResolution.SignDetails != nil
+}
+
+// isTaproot returns true if the resolver is for a taproot output.
+func (h *htlcSuccessResolver) isTaproot() bool {
+ return txscript.IsPayToTaproot(
+ h.htlcResolution.SweepSignDesc.Output.PkScript,
+ )
+}
+
+// sweepRemoteCommitOutput creates a sweep request to sweep the HTLC output on
+// the remote commitment via the direct preimage-spend.
+func (h *htlcSuccessResolver) sweepRemoteCommitOutput() error {
+ // Before we can craft out sweeping transaction, we need to create an
+ // input which contains all the items required to add this input to a
+ // sweeping transaction, and generate a witness.
+ var inp input.Input
+
+ if h.isTaproot() {
+ inp = lnutils.Ptr(input.MakeTaprootHtlcSucceedInput(
+ &h.htlcResolution.ClaimOutpoint,
+ &h.htlcResolution.SweepSignDesc,
+ h.htlcResolution.Preimage[:],
+ h.broadcastHeight,
+ h.htlcResolution.CsvDelay,
+ input.WithResolutionBlob(
+ h.htlcResolution.ResolutionBlob,
+ ),
+ ))
+ } else {
+ inp = lnutils.Ptr(input.MakeHtlcSucceedInput(
+ &h.htlcResolution.ClaimOutpoint,
+ &h.htlcResolution.SweepSignDesc,
+ h.htlcResolution.Preimage[:],
+ h.broadcastHeight,
+ h.htlcResolution.CsvDelay,
+ ))
+ }
+
+ // Calculate the budget for this sweep.
+ budget := calculateBudget(
+ btcutil.Amount(inp.SignDesc().Output.Value),
+ h.Budget.DeadlineHTLCRatio,
+ h.Budget.DeadlineHTLC,
+ )
+
+ deadline := fn.Some(int32(h.htlc.RefundTimeout))
+
+ log.Infof("%T(%x): offering direct-preimage HTLC output to sweeper "+
+ "with deadline=%v, budget=%v", h, h.htlc.RHash[:],
+ h.htlc.RefundTimeout, budget)
+
+ // We'll now offer the direct preimage HTLC to the sweeper.
+ _, err := h.Sweeper.SweepInput(
+ inp,
+ sweep.Params{
+ Budget: budget,
+ DeadlineHeight: deadline,
+ },
+ )
+
+ return err
+}
+
+// sweepSuccessTx attempts to sweep the second level success tx.
+func (h *htlcSuccessResolver) sweepSuccessTx() error {
+ var secondLevelInput input.HtlcSecondLevelAnchorInput
+ if h.isTaproot() {
+ secondLevelInput = input.MakeHtlcSecondLevelSuccessTaprootInput(
+ h.htlcResolution.SignedSuccessTx,
+ h.htlcResolution.SignDetails, h.htlcResolution.Preimage,
+ h.broadcastHeight, input.WithResolutionBlob(
+ h.htlcResolution.ResolutionBlob,
+ ),
+ )
+ } else {
+ secondLevelInput = input.MakeHtlcSecondLevelSuccessAnchorInput(
+ h.htlcResolution.SignedSuccessTx,
+ h.htlcResolution.SignDetails, h.htlcResolution.Preimage,
+ h.broadcastHeight,
+ )
+ }
+
+ // Calculate the budget for this sweep.
+ value := btcutil.Amount(secondLevelInput.SignDesc().Output.Value)
+ budget := calculateBudget(
+ value, h.Budget.DeadlineHTLCRatio, h.Budget.DeadlineHTLC,
+ )
+
+ // The deadline would be the CLTV in this HTLC output. If we are the
+ // initiator of this force close, with the default
+ // `IncomingBroadcastDelta`, it means we have 10 blocks left when going
+ // onchain.
+ deadline := fn.Some(int32(h.htlc.RefundTimeout))
+
+ h.log.Infof("offering second-level HTLC success tx to sweeper with "+
+ "deadline=%v, budget=%v", h.htlc.RefundTimeout, budget)
+
+ // We'll now offer the second-level transaction to the sweeper.
+ _, err := h.Sweeper.SweepInput(
+ &secondLevelInput,
+ sweep.Params{
+ Budget: budget,
+ DeadlineHeight: deadline,
+ },
+ )
+
+ return err
+}
+
+// sweepSuccessTxOutput attempts to sweep the output of the second level
+// success tx.
+func (h *htlcSuccessResolver) sweepSuccessTxOutput() error {
+ h.log.Debugf("sweeping output %v from 2nd-level HTLC success tx",
+ h.htlcResolution.ClaimOutpoint)
+
+ // This should be non-blocking as we will only attempt to sweep the
+ // output when the second level tx has already been confirmed. In other
+ // words, waitForSpend will return immediately.
+ commitSpend, err := waitForSpend(
+ &h.htlcResolution.SignedSuccessTx.TxIn[0].PreviousOutPoint,
+ h.htlcResolution.SignDetails.SignDesc.Output.PkScript,
+ h.broadcastHeight, h.Notifier, h.quit,
+ )
+ if err != nil {
+ return err
+ }
+
+ // The HTLC success tx has a CSV lock that we must wait for, and if
+ // this is a lease enforced channel and we're the imitator, we may need
+ // to wait for longer.
+ waitHeight := h.deriveWaitHeight(h.htlcResolution.CsvDelay, commitSpend)
+
+ // Now that the sweeper has broadcasted the second-level transaction,
+ // it has confirmed, and we have checkpointed our state, we'll sweep
+ // the second level output. We report the resolver has moved the next
+ // stage.
+ h.reportLock.Lock()
+ h.currentReport.Stage = 2
+ h.currentReport.MaturityHeight = waitHeight
+ h.reportLock.Unlock()
+
+ if h.hasCLTV() {
+ log.Infof("%T(%x): waiting for CSV and CLTV lock to expire at "+
+ "height %v", h, h.htlc.RHash[:], waitHeight)
+ } else {
+ log.Infof("%T(%x): waiting for CSV lock to expire at height %v",
+ h, h.htlc.RHash[:], waitHeight)
+ }
+
+ // We'll use this input index to determine the second-level output
+ // index on the transaction, as the signatures requires the indexes to
+ // be the same. We don't look for the second-level output script
+ // directly, as there might be more than one HTLC output to the same
+ // pkScript.
+ op := &wire.OutPoint{
+ Hash: *commitSpend.SpenderTxHash,
+ Index: commitSpend.SpenderInputIndex,
+ }
+
+ // Let the sweeper sweep the second-level output now that the
+ // CSV/CLTV locks have expired.
+ var witType input.StandardWitnessType
+ if h.isTaproot() {
+ witType = input.TaprootHtlcAcceptedSuccessSecondLevel
+ } else {
+ witType = input.HtlcAcceptedSuccessSecondLevel
+ }
+ inp := h.makeSweepInput(
+ op, witType,
+ input.LeaseHtlcAcceptedSuccessSecondLevel,
+ &h.htlcResolution.SweepSignDesc,
+ h.htlcResolution.CsvDelay, uint32(commitSpend.SpendingHeight),
+ h.htlc.RHash, h.htlcResolution.ResolutionBlob,
+ )
+
+ // Calculate the budget for this sweep.
+ budget := calculateBudget(
+ btcutil.Amount(inp.SignDesc().Output.Value),
+ h.Budget.NoDeadlineHTLCRatio,
+ h.Budget.NoDeadlineHTLC,
+ )
+
+ log.Infof("%T(%x): offering second-level success tx output to sweeper "+
+ "with no deadline and budget=%v at height=%v", h,
+ h.htlc.RHash[:], budget, waitHeight)
+
+ // TODO(yy): use the result chan returned from SweepInput.
+ _, err = h.Sweeper.SweepInput(
+ inp,
+ sweep.Params{
+ Budget: budget,
+
+ // For second level success tx, there's no rush to get
+ // it confirmed, so we use a nil deadline.
+ DeadlineHeight: fn.None[int32](),
+ },
+ )
+
+ return err
+}
+
+// resolveLegacySuccessTx handles an HTLC output from a pre-anchor type channel
+// by broadcasting the second-level success transaction.
+func (h *htlcSuccessResolver) resolveLegacySuccessTx() error {
+ // Otherwise we'll publish the second-level transaction directly and
+ // offer the resolution to the nursery to handle.
+ h.log.Infof("broadcasting second-level success transition tx: %v",
+ h.htlcResolution.SignedSuccessTx.TxHash())
+
+ // We'll now broadcast the second layer transaction so we can kick off
+ // the claiming process.
+ //
+ // TODO(yy): offer it to the sweeper instead.
+ label := labels.MakeLabel(
+ labels.LabelTypeChannelClose, &h.ShortChanID,
+ )
+ err := h.PublishTx(h.htlcResolution.SignedSuccessTx, label)
+ if err != nil {
+ return err
+ }
+
+ // Fast-forward to resolve the output from the success tx if the it has
+ // already been sent to the UtxoNursery.
+ if h.outputIncubating {
+ return h.resolveSuccessTxOutput(h.htlcResolution.ClaimOutpoint)
+ }
+
+ h.log.Infof("incubating incoming htlc output")
+
+ // Send the output to the incubator.
+ err = h.IncubateOutputs(
+ h.ChanPoint, fn.None[lnwallet.OutgoingHtlcResolution](),
+ fn.Some(h.htlcResolution),
+ h.broadcastHeight, fn.Some(int32(h.htlc.RefundTimeout)),
+ )
+ if err != nil {
+ return err
+ }
+
+ // Mark the output as incubating and checkpoint it.
+ h.outputIncubating = true
+ if err := h.Checkpoint(h); err != nil {
+ return err
+ }
+
+ // Move to resolve the output.
+ return h.resolveSuccessTxOutput(h.htlcResolution.ClaimOutpoint)
+}
+
+// resolveSuccessTx waits for the sweeping tx of the second-level success tx to
+// confirm and offers the output from the success tx to the sweeper.
+func (h *htlcSuccessResolver) resolveSuccessTx() error {
+ h.log.Infof("waiting for 2nd-level HTLC success transaction to confirm")
+
+ // Create aliases to make the code more readable.
+ outpoint := h.htlcResolution.SignedSuccessTx.TxIn[0].PreviousOutPoint
+ pkScript := h.htlcResolution.SignDetails.SignDesc.Output.PkScript
+
+ // Wait for the second level transaction to confirm.
+ commitSpend, err := waitForSpend(
+ &outpoint, pkScript, h.broadcastHeight, h.Notifier, h.quit,
+ )
+ if err != nil {
+ return err
+ }
+
+ // We'll use this input index to determine the second-level output
+ // index on the transaction, as the signatures requires the indexes to
+ // be the same. We don't look for the second-level output script
+ // directly, as there might be more than one HTLC output to the same
+ // pkScript.
+ op := wire.OutPoint{
+ Hash: *commitSpend.SpenderTxHash,
+ Index: commitSpend.SpenderInputIndex,
+ }
+
+ // If the 2nd-stage sweeping has already been started, we can
+ // fast-forward to start the resolving process for the stage two
+ // output.
+ if h.outputIncubating {
+ return h.resolveSuccessTxOutput(op)
+ }
+
+ // Now that the second-level transaction has confirmed, we checkpoint
+ // the state so we'll go to the next stage in case of restarts.
+ h.outputIncubating = true
+ if err := h.Checkpoint(h); err != nil {
+ log.Errorf("unable to Checkpoint: %v", err)
+ return err
+ }
+
+ h.log.Infof("2nd-level HTLC success tx=%v confirmed",
+ commitSpend.SpenderTxHash)
+
+ // Send the sweep request for the output from the success tx.
+ if err := h.sweepSuccessTxOutput(); err != nil {
+ return err
+ }
+
+ return h.resolveSuccessTxOutput(op)
+}
+
+// resolveSuccessTxOutput waits for the spend of the output from the 2nd-level
+// success tx.
+func (h *htlcSuccessResolver) resolveSuccessTxOutput(op wire.OutPoint) error {
+ // To wrap this up, we'll wait until the second-level transaction has
+ // been spent, then fully resolve the contract.
+ log.Infof("%T(%x): waiting for second-level HTLC output to be spent "+
+ "after csv_delay=%v", h, h.htlc.RHash[:],
+ h.htlcResolution.CsvDelay)
+
+ spend, err := waitForSpend(
+ &op, h.htlcResolution.SweepSignDesc.Output.PkScript,
+ h.broadcastHeight, h.Notifier, h.quit,
+ )
+ if err != nil {
+ return err
+ }
+
+ h.reportLock.Lock()
+ h.currentReport.RecoveredBalance = h.currentReport.LimboBalance
+ h.currentReport.LimboBalance = 0
+ h.reportLock.Unlock()
+
+ return h.checkpointClaim(spend.SpenderTxHash)
+}
+
+// Launch creates an input based on the details of the incoming htlc resolution
+// and offers it to the sweeper.
+func (h *htlcSuccessResolver) Launch() error {
+ if h.launched.Load() {
+ h.log.Tracef("already launched")
+ return nil
+ }
+
+ h.log.Debugf("launching resolver...")
+ h.launched.Store(true)
+
+ // If the HTLC has custom records, then for now we'll pause resolution.
+ //
+ // TODO(roasbeef): Implement resolving HTLCs with custom records
+ // (follow-up PR).
+ if len(h.htlc.CustomRecords) != 0 {
+ select { //nolint:gosimple
+ case <-h.quit:
+ return nil
+ }
+ }
+
+ switch {
+ // If we're already resolved, then we can exit early.
+ case h.IsResolved():
+ h.log.Errorf("already resolved")
+ return nil
+
+ // If this is an output on the remote party's commitment transaction,
+ // use the direct-spend path.
+ case h.isRemoteCommitOutput():
+ return h.sweepRemoteCommitOutput()
+
+ // If this is an anchor type channel, we now sweep either the
+ // second-level success tx or the output from the second-level success
+ // tx.
+ case h.isZeroFeeOutput():
+ // If the second-level success tx has already been swept, we
+ // can go ahead and sweep its output.
+ if h.outputIncubating {
+ return h.sweepSuccessTxOutput()
+ }
+
+ // Otherwise, sweep the second level tx.
+ return h.sweepSuccessTx()
+
+ // If this is a legacy channel type, the output is handled by the
+ // nursery via the Resolve so we do nothing here.
+ //
+ // TODO(yy): handle the legacy output by offering it to the sweeper.
+ default:
+ return nil
+ }
+}
diff --git a/contractcourt/htlc_success_resolver_test.go b/contractcourt/htlc_success_resolver_test.go
index b9182500bb..92ebecd836 100644
--- a/contractcourt/htlc_success_resolver_test.go
+++ b/contractcourt/htlc_success_resolver_test.go
@@ -5,6 +5,7 @@ import (
"fmt"
"reflect"
"testing"
+ "time"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg/chainhash"
@@ -20,6 +21,7 @@ import (
"github.com/lightningnetwork/lnd/lntest/mock"
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/lightningnetwork/lnd/lnwire"
+ "github.com/stretchr/testify/require"
)
var testHtlcAmt = lnwire.MilliSatoshi(200000)
@@ -39,6 +41,15 @@ type htlcResolverTestContext struct {
t *testing.T
}
+func newHtlcResolverTestContextFromReader(t *testing.T,
+ newResolver func(htlc channeldb.HTLC,
+ cfg ResolverConfig) ContractResolver) *htlcResolverTestContext {
+
+ ctx := newHtlcResolverTestContext(t, newResolver)
+
+ return ctx
+}
+
func newHtlcResolverTestContext(t *testing.T,
newResolver func(htlc channeldb.HTLC,
cfg ResolverConfig) ContractResolver) *htlcResolverTestContext {
@@ -133,8 +144,12 @@ func newHtlcResolverTestContext(t *testing.T,
func (i *htlcResolverTestContext) resolve() {
// Start resolver.
i.resolverResultChan = make(chan resolveResult, 1)
+
go func() {
- nextResolver, err := i.resolver.Resolve(false)
+ err := i.resolver.Launch()
+ require.NoError(i.t, err)
+
+ nextResolver, err := i.resolver.Resolve()
i.resolverResultChan <- resolveResult{
nextResolver: nextResolver,
err: err,
@@ -192,6 +207,7 @@ func TestHtlcSuccessSingleStage(t *testing.T) {
// sweeper.
details := &chainntnfs.SpendDetail{
SpendingTx: sweepTx,
+ SpentOutPoint: &htlcOutpoint,
SpenderTxHash: &sweepTxid,
}
ctx.notifier.SpendChan <- details
@@ -215,8 +231,8 @@ func TestHtlcSuccessSingleStage(t *testing.T) {
)
}
-// TestSecondStageResolution tests successful sweep of a second stage htlc
-// claim, going through the Nursery.
+// TestHtlcSuccessSecondStageResolution tests successful sweep of a second
+// stage htlc claim, going through the Nursery.
func TestHtlcSuccessSecondStageResolution(t *testing.T) {
commitOutpoint := wire.OutPoint{Index: 2}
htlcOutpoint := wire.OutPoint{Index: 3}
@@ -279,6 +295,7 @@ func TestHtlcSuccessSecondStageResolution(t *testing.T) {
ctx.notifier.SpendChan <- &chainntnfs.SpendDetail{
SpendingTx: sweepTx,
+ SpentOutPoint: &htlcOutpoint,
SpenderTxHash: &sweepHash,
}
@@ -302,6 +319,8 @@ func TestHtlcSuccessSecondStageResolution(t *testing.T) {
// TestHtlcSuccessSecondStageResolutionSweeper test that a resolver with
// non-nil SignDetails will offer the second-level transaction to the sweeper
// for re-signing.
+//
+//nolint:lll
func TestHtlcSuccessSecondStageResolutionSweeper(t *testing.T) {
commitOutpoint := wire.OutPoint{Index: 2}
htlcOutpoint := wire.OutPoint{Index: 3}
@@ -399,7 +418,20 @@ func TestHtlcSuccessSecondStageResolutionSweeper(t *testing.T) {
_ bool) error {
resolver := ctx.resolver.(*htlcSuccessResolver)
- inp := <-resolver.Sweeper.(*mockSweeper).sweptInputs
+
+ var (
+ inp input.Input
+ ok bool
+ )
+
+ select {
+ case inp, ok = <-resolver.Sweeper.(*mockSweeper).sweptInputs:
+ require.True(t, ok)
+
+ case <-time.After(1 * time.Second):
+ t.Fatal("expected input to be swept")
+ }
+
op := inp.OutPoint()
if op != commitOutpoint {
return fmt.Errorf("outpoint %v swept, "+
@@ -412,6 +444,7 @@ func TestHtlcSuccessSecondStageResolutionSweeper(t *testing.T) {
SpenderTxHash: &reSignedHash,
SpenderInputIndex: 1,
SpendingHeight: 10,
+ SpentOutPoint: &commitOutpoint,
}
return nil
},
@@ -434,17 +467,37 @@ func TestHtlcSuccessSecondStageResolutionSweeper(t *testing.T) {
SpenderTxHash: &reSignedHash,
SpenderInputIndex: 1,
SpendingHeight: 10,
+ SpentOutPoint: &commitOutpoint,
}
}
- ctx.notifier.EpochChan <- &chainntnfs.BlockEpoch{
- Height: 13,
- }
-
// We expect it to sweep the second-level
// transaction we notfied about above.
resolver := ctx.resolver.(*htlcSuccessResolver)
- inp := <-resolver.Sweeper.(*mockSweeper).sweptInputs
+
+ // Mock `waitForSpend` to return the commit
+ // spend.
+ ctx.notifier.SpendChan <- &chainntnfs.SpendDetail{
+ SpendingTx: reSignedSuccessTx,
+ SpenderTxHash: &reSignedHash,
+ SpenderInputIndex: 1,
+ SpendingHeight: 10,
+ SpentOutPoint: &commitOutpoint,
+ }
+
+ var (
+ inp input.Input
+ ok bool
+ )
+
+ select {
+ case inp, ok = <-resolver.Sweeper.(*mockSweeper).sweptInputs:
+ require.True(t, ok)
+
+ case <-time.After(1 * time.Second):
+ t.Fatal("expected input to be swept")
+ }
+
op := inp.OutPoint()
exp := wire.OutPoint{
Hash: reSignedHash,
@@ -461,6 +514,7 @@ func TestHtlcSuccessSecondStageResolutionSweeper(t *testing.T) {
SpendingTx: sweepTx,
SpenderTxHash: &sweepHash,
SpendingHeight: 14,
+ SpentOutPoint: &op,
}
return nil
@@ -508,11 +562,14 @@ func testHtlcSuccess(t *testing.T, resolution lnwallet.IncomingHtlcResolution,
// for the next portion of the test.
ctx := newHtlcResolverTestContext(t,
func(htlc channeldb.HTLC, cfg ResolverConfig) ContractResolver {
- return &htlcSuccessResolver{
+ r := &htlcSuccessResolver{
contractResolverKit: *newContractResolverKit(cfg),
htlc: htlc,
htlcResolution: resolution,
}
+ r.initLogger("htlcSuccessResolver")
+
+ return r
},
)
@@ -562,11 +619,11 @@ func runFromCheckpoint(t *testing.T, ctx *htlcResolverTestContext,
var resolved, incubating bool
if h, ok := resolver.(*htlcSuccessResolver); ok {
- resolved = h.resolved
+ resolved = h.resolved.Load()
incubating = h.outputIncubating
}
if h, ok := resolver.(*htlcTimeoutResolver); ok {
- resolved = h.resolved
+ resolved = h.resolved.Load()
incubating = h.outputIncubating
}
@@ -610,7 +667,12 @@ func runFromCheckpoint(t *testing.T, ctx *htlcResolverTestContext,
checkpointedState = append(checkpointedState, b.Bytes())
nextCheckpoint++
- checkpointChan <- struct{}{}
+ select {
+ case checkpointChan <- struct{}{}:
+ case <-time.After(1 * time.Second):
+ t.Fatal("checkpoint timeout")
+ }
+
return nil
}
@@ -621,6 +683,8 @@ func runFromCheckpoint(t *testing.T, ctx *htlcResolverTestContext,
// preCheckpoint logic if needed.
resumed := true
for i, cp := range expectedCheckpoints {
+ t.Logf("Running checkpoint %d", i)
+
if cp.preCheckpoint != nil {
if err := cp.preCheckpoint(ctx, resumed); err != nil {
t.Fatalf("failure at stage %d: %v", i, err)
@@ -629,15 +693,15 @@ func runFromCheckpoint(t *testing.T, ctx *htlcResolverTestContext,
resumed = false
// Wait for the resolver to have checkpointed its state.
- <-checkpointChan
+ select {
+ case <-checkpointChan:
+ case <-time.After(1 * time.Second):
+ t.Fatalf("resolver did not checkpoint at stage %d", i)
+ }
}
// Wait for the resolver to fully complete.
ctx.waitForResult()
- if nextCheckpoint < len(expectedCheckpoints) {
- t.Fatalf("not all checkpoints hit")
- }
-
return checkpointedState
}
diff --git a/contractcourt/htlc_timeout_resolver.go b/contractcourt/htlc_timeout_resolver.go
index e7ab421691..bbedb9581b 100644
--- a/contractcourt/htlc_timeout_resolver.go
+++ b/contractcourt/htlc_timeout_resolver.go
@@ -7,11 +7,13 @@ import (
"sync"
"github.com/btcsuite/btcd/btcutil"
+ "github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/channeldb"
+ "github.com/lightningnetwork/lnd/channeldb/models"
"github.com/lightningnetwork/lnd/fn"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/lntypes"
@@ -37,9 +39,6 @@ type htlcTimeoutResolver struct {
// incubator (utxo nursery).
outputIncubating bool
- // resolved reflects if the contract has been fully resolved or not.
- resolved bool
-
// broadcastHeight is the height that the original contract was
// broadcast to the main-chain at. We'll use this value to bound any
// historical queries to the chain for spends/confirmations.
@@ -82,6 +81,7 @@ func newTimeoutResolver(res lnwallet.OutgoingHtlcResolution,
}
h.initReport()
+ h.initLogger(fmt.Sprintf("%T(%v)", h, h.outpoint()))
return h
}
@@ -93,23 +93,25 @@ func (h *htlcTimeoutResolver) isTaproot() bool {
)
}
-// ResolverKey returns an identifier which should be globally unique for this
-// particular resolver within the chain the original contract resides within.
-//
-// NOTE: Part of the ContractResolver interface.
-func (h *htlcTimeoutResolver) ResolverKey() []byte {
+// outpoint returns the outpoint of the HTLC output we're attempting to sweep.
+func (h *htlcTimeoutResolver) outpoint() wire.OutPoint {
// The primary key for this resolver will be the outpoint of the HTLC
// on the commitment transaction itself. If this is our commitment,
// then the output can be found within the signed timeout tx,
// otherwise, it's just the ClaimOutpoint.
- var op wire.OutPoint
if h.htlcResolution.SignedTimeoutTx != nil {
- op = h.htlcResolution.SignedTimeoutTx.TxIn[0].PreviousOutPoint
- } else {
- op = h.htlcResolution.ClaimOutpoint
+ return h.htlcResolution.SignedTimeoutTx.TxIn[0].PreviousOutPoint
}
- key := newResolverID(op)
+ return h.htlcResolution.ClaimOutpoint
+}
+
+// ResolverKey returns an identifier which should be globally unique for this
+// particular resolver within the chain the original contract resides within.
+//
+// NOTE: Part of the ContractResolver interface.
+func (h *htlcTimeoutResolver) ResolverKey() []byte {
+ key := newResolverID(h.outpoint())
return key[:]
}
@@ -157,7 +159,7 @@ const (
// by the remote party. It'll extract the preimage, add it to the global cache,
// and finally send the appropriate clean up message.
func (h *htlcTimeoutResolver) claimCleanUp(
- commitSpend *chainntnfs.SpendDetail) (ContractResolver, error) {
+ commitSpend *chainntnfs.SpendDetail) error {
// Depending on if this is our commitment or not, then we'll be looking
// for a different witness pattern.
@@ -192,7 +194,7 @@ func (h *htlcTimeoutResolver) claimCleanUp(
// element, then we're actually on the losing side of a breach
// attempt...
case h.isTaproot() && len(spendingInput.Witness) == 1:
- return nil, fmt.Errorf("breach attempt failed")
+ return fmt.Errorf("breach attempt failed")
// Otherwise, they'll be spending directly from our commitment output.
// In which case the witness stack looks like:
@@ -209,8 +211,8 @@ func (h *htlcTimeoutResolver) claimCleanUp(
preimage, err := lntypes.MakePreimage(preimageBytes)
if err != nil {
- return nil, fmt.Errorf("unable to create pre-image from "+
- "witness: %v", err)
+ return fmt.Errorf("unable to create pre-image from witness: %w",
+ err)
}
log.Infof("%T(%v): extracting preimage=%v from on-chain "+
@@ -232,9 +234,9 @@ func (h *htlcTimeoutResolver) claimCleanUp(
HtlcIndex: h.htlc.HtlcIndex,
PreImage: &pre,
}); err != nil {
- return nil, err
+ return err
}
- h.resolved = true
+ h.resolved.Store(true)
// Checkpoint our resolver with a report which reflects the preimage
// claim by the remote party.
@@ -247,7 +249,7 @@ func (h *htlcTimeoutResolver) claimCleanUp(
SpendTxID: commitSpend.SpenderTxHash,
}
- return nil, h.Checkpoint(h, report)
+ return h.Checkpoint(h, report)
}
// chainDetailsToWatch returns the output and script which we use to watch for
@@ -418,70 +420,44 @@ func checkSizeAndIndex(witness wire.TxWitness, size, index int) bool {
// see a direct sweep via the timeout clause.
//
// NOTE: Part of the ContractResolver interface.
-func (h *htlcTimeoutResolver) Resolve(
- immediate bool) (ContractResolver, error) {
-
+func (h *htlcTimeoutResolver) Resolve() (ContractResolver, error) {
// If we're already resolved, then we can exit early.
- if h.resolved {
+ if h.IsResolved() {
+ h.log.Errorf("already resolved")
return nil, nil
}
- // Start by spending the HTLC output, either by broadcasting the
- // second-level timeout transaction, or directly if this is the remote
- // commitment.
- commitSpend, err := h.spendHtlcOutput(immediate)
- if err != nil {
- return nil, err
+ // If the HTLC has custom records, then for now we'll pause resolution.
+ //
+ // TODO(roasbeef): Implement resolving HTLCs with custom records
+ // (follow-up PR).
+ if len(h.htlc.CustomRecords) != 0 {
+ select { //nolint:gosimple
+ case <-h.quit:
+ return nil, errResolverShuttingDown
+ }
}
- // If the spend reveals the pre-image, then we'll enter the clean up
- // workflow to pass the pre-image back to the incoming link, add it to
- // the witness cache, and exit.
- if isPreimageSpend(
- h.isTaproot(), commitSpend,
- h.htlcResolution.SignedTimeoutTx != nil,
- ) {
-
- log.Infof("%T(%v): HTLC has been swept with pre-image by "+
- "remote party during timeout flow! Adding pre-image to "+
- "witness cache", h, h.htlc.RHash[:],
- h.htlcResolution.ClaimOutpoint)
-
- return h.claimCleanUp(commitSpend)
+ // If this is an output on the remote party's commitment transaction,
+ // use the direct-spend path to sweep the htlc.
+ if h.isRemoteCommitOutput() {
+ return nil, h.resolveRemoteCommitOutput()
}
- // At this point, the second-level transaction is sufficiently
- // confirmed, or a transaction directly spending the output is.
- // Therefore, we can now send back our clean up message, failing the
- // HTLC on the incoming link.
- //
- // NOTE: This can be called twice if the outgoing resolver restarts
- // before the second-stage timeout transaction is confirmed.
- log.Infof("%T(%v): resolving htlc with incoming fail msg, "+
- "fully confirmed", h, h.htlcResolution.ClaimOutpoint)
-
- failureMsg := &lnwire.FailPermanentChannelFailure{}
- err = h.DeliverResolutionMsg(ResolutionMsg{
- SourceChan: h.ShortChanID,
- HtlcIndex: h.htlc.HtlcIndex,
- Failure: failureMsg,
- })
- if err != nil {
- return nil, err
+ // If this is a zero-fee HTLC, we now handle the spend from our
+ // commitment transaction.
+ if h.isZeroFeeOutput() {
+ return nil, h.resolveTimeoutTx()
}
- // Depending on whether this was a local or remote commit, we must
- // handle the spending transaction accordingly.
- return h.handleCommitSpend(commitSpend)
+ // If this is an output on our own commitment using pre-anchor channel
+ // type, we will let the utxo nursery handle it.
+ return nil, h.resolveSecondLevelTxLegacy()
}
-// sweepSecondLevelTx sends a second level timeout transaction to the sweeper.
+// sweepTimeoutTx sends a second level timeout transaction to the sweeper.
// This transaction uses the SINLGE|ANYONECANPAY flag.
-func (h *htlcTimeoutResolver) sweepSecondLevelTx(immediate bool) error {
- log.Infof("%T(%x): offering second-layer timeout tx to sweeper: %v",
- h, h.htlc.RHash[:],
- spew.Sdump(h.htlcResolution.SignedTimeoutTx))
-
+func (h *htlcTimeoutResolver) sweepTimeoutTx() error {
var inp input.Input
if h.isTaproot() {
inp = lnutils.Ptr(input.MakeHtlcSecondLevelTimeoutTaprootInput(
@@ -512,33 +488,17 @@ func (h *htlcTimeoutResolver) sweepSecondLevelTx(immediate bool) error {
btcutil.Amount(inp.SignDesc().Output.Value), 2, 0,
)
- // For an outgoing HTLC, it must be swept before the RefundTimeout of
- // its incoming HTLC is reached.
- //
- // TODO(yy): we may end up mixing inputs with different time locks.
- // Suppose we have two outgoing HTLCs,
- // - HTLC1: nLocktime is 800000, CLTV delta is 80.
- // - HTLC2: nLocktime is 800001, CLTV delta is 79.
- // This means they would both have an incoming HTLC that expires at
- // 800080, hence they share the same deadline but different locktimes.
- // However, with current design, when we are at block 800000, HTLC1 is
- // offered to the sweeper. When block 800001 is reached, HTLC1's
- // sweeping process is already started, while HTLC2 is being offered to
- // the sweeper, so they won't be mixed. This can become an issue tho,
- // if we decide to sweep per X blocks. Or the contractcourt sees the
- // block first while the sweeper is only aware of the last block. To
- // properly fix it, we need `blockbeat` to make sure subsystems are in
- // sync.
- log.Infof("%T(%x): offering second-level HTLC timeout tx to sweeper "+
+ h.log.Infof("%T(%x): offering 2nd-level HTLC timeout tx to sweeper "+
"with deadline=%v, budget=%v", h, h.htlc.RHash[:],
h.incomingHTLCExpiryHeight, budget)
+ // For an outgoing HTLC, it must be swept before the RefundTimeout of
+ // its incoming HTLC is reached.
_, err := h.Sweeper.SweepInput(
inp,
sweep.Params{
Budget: budget,
DeadlineHeight: h.incomingHTLCExpiryHeight,
- Immediate: immediate,
},
)
if err != nil {
@@ -548,31 +508,31 @@ func (h *htlcTimeoutResolver) sweepSecondLevelTx(immediate bool) error {
return err
}
-// sendSecondLevelTxLegacy sends a second level timeout transaction to the utxo
-// nursery. This transaction uses the legacy SIGHASH_ALL flag.
-func (h *htlcTimeoutResolver) sendSecondLevelTxLegacy() error {
- log.Debugf("%T(%v): incubating htlc output", h,
- h.htlcResolution.ClaimOutpoint)
+// resolveSecondLevelTxLegacy sends a second level timeout transaction to the
+// utxo nursery. This transaction uses the legacy SIGHASH_ALL flag.
+func (h *htlcTimeoutResolver) resolveSecondLevelTxLegacy() error {
+ h.log.Debug("incubating htlc output")
+ // The utxo nursery will take care of broadcasting the second-level
+ // timeout tx and sweeping its output once it confirms.
err := h.IncubateOutputs(
h.ChanPoint, fn.Some(h.htlcResolution),
fn.None[lnwallet.IncomingHtlcResolution](),
h.broadcastHeight, h.incomingHTLCExpiryHeight,
)
+
if err != nil {
return err
}
- h.outputIncubating = true
-
- return h.Checkpoint(h)
+ return h.resolveTimeoutTx()
}
// sweepDirectHtlcOutput sends the direct spend of the HTLC output to the
// sweeper. This is used when the remote party goes on chain, and we're able to
// sweep an HTLC we offered after a timeout. Only the CLTV encumbered outputs
// are resolved via this path.
-func (h *htlcTimeoutResolver) sweepDirectHtlcOutput(immediate bool) error {
+func (h *htlcTimeoutResolver) sweepDirectHtlcOutput() error {
var htlcWitnessType input.StandardWitnessType
if h.isTaproot() {
htlcWitnessType = input.TaprootHtlcOfferedRemoteTimeout
@@ -612,7 +572,6 @@ func (h *htlcTimeoutResolver) sweepDirectHtlcOutput(immediate bool) error {
// This is an outgoing HTLC, so we want to make sure
// that we sweep it before the incoming HTLC expires.
DeadlineHeight: h.incomingHTLCExpiryHeight,
- Immediate: immediate,
},
)
if err != nil {
@@ -622,53 +581,6 @@ func (h *htlcTimeoutResolver) sweepDirectHtlcOutput(immediate bool) error {
return nil
}
-// spendHtlcOutput handles the initial spend of an HTLC output via the timeout
-// clause. If this is our local commitment, the second-level timeout TX will be
-// used to spend the output into the next stage. If this is the remote
-// commitment, the output will be swept directly without the timeout
-// transaction.
-func (h *htlcTimeoutResolver) spendHtlcOutput(
- immediate bool) (*chainntnfs.SpendDetail, error) {
-
- switch {
- // If we have non-nil SignDetails, this means that have a 2nd level
- // HTLC transaction that is signed using sighash SINGLE|ANYONECANPAY
- // (the case for anchor type channels). In this case we can re-sign it
- // and attach fees at will. We let the sweeper handle this job.
- case h.htlcResolution.SignDetails != nil && !h.outputIncubating:
- if err := h.sweepSecondLevelTx(immediate); err != nil {
- log.Errorf("Sending timeout tx to sweeper: %v", err)
-
- return nil, err
- }
-
- // If this is a remote commitment there's no second level timeout txn,
- // and we can just send this directly to the sweeper.
- case h.htlcResolution.SignedTimeoutTx == nil && !h.outputIncubating:
- if err := h.sweepDirectHtlcOutput(immediate); err != nil {
- log.Errorf("Sending direct spend to sweeper: %v", err)
-
- return nil, err
- }
-
- // If we have a SignedTimeoutTx but no SignDetails, this is a local
- // commitment for a non-anchor channel, so we'll send it to the utxo
- // nursery.
- case h.htlcResolution.SignDetails == nil && !h.outputIncubating:
- if err := h.sendSecondLevelTxLegacy(); err != nil {
- log.Errorf("Sending timeout tx to nursery: %v", err)
-
- return nil, err
- }
- }
-
- // Now that we've handed off the HTLC to the nursery or sweeper, we'll
- // watch for a spend of the output, and make our next move off of that.
- // Depending on if this is our commitment, or the remote party's
- // commitment, we'll be watching a different outpoint and script.
- return h.watchHtlcSpend()
-}
-
// watchHtlcSpend watches for a spend of the HTLC output. For neutrino backend,
// it will check blocks for the confirmed spend. For btcd and bitcoind, it will
// check both the mempool and the blocks.
@@ -697,9 +609,6 @@ func (h *htlcTimeoutResolver) watchHtlcSpend() (*chainntnfs.SpendDetail,
func (h *htlcTimeoutResolver) waitForConfirmedSpend(op *wire.OutPoint,
pkScript []byte) (*chainntnfs.SpendDetail, error) {
- log.Infof("%T(%v): waiting for spent of HTLC output %v to be "+
- "fully confirmed", h, h.htlcResolution.ClaimOutpoint, op)
-
// We'll block here until either we exit, or the HTLC output on the
// commitment transaction has been spent.
spend, err := waitForSpend(
@@ -709,239 +618,18 @@ func (h *htlcTimeoutResolver) waitForConfirmedSpend(op *wire.OutPoint,
return nil, err
}
- // Once confirmed, persist the state on disk.
- if err := h.checkPointSecondLevelTx(); err != nil {
- return nil, err
- }
-
return spend, err
}
-// checkPointSecondLevelTx persists the state of a second level HTLC tx to disk
-// if it's published by the sweeper.
-func (h *htlcTimeoutResolver) checkPointSecondLevelTx() error {
- // If this was the second level transaction published by the sweeper,
- // we can checkpoint the resolver now that it's confirmed.
- if h.htlcResolution.SignDetails != nil && !h.outputIncubating {
- h.outputIncubating = true
- if err := h.Checkpoint(h); err != nil {
- log.Errorf("unable to Checkpoint: %v", err)
- return err
- }
- }
-
- return nil
-}
-
-// handleCommitSpend handles the spend of the HTLC output on the commitment
-// transaction. If this was our local commitment, the spend will be he
-// confirmed second-level timeout transaction, and we'll sweep that into our
-// wallet. If the was a remote commitment, the resolver will resolve
-// immetiately.
-func (h *htlcTimeoutResolver) handleCommitSpend(
- commitSpend *chainntnfs.SpendDetail) (ContractResolver, error) {
-
- var (
- // claimOutpoint will be the outpoint of the second level
- // transaction, or on the remote commitment directly. It will
- // start out as set in the resolution, but we'll update it if
- // the second-level goes through the sweeper and changes its
- // txid.
- claimOutpoint = h.htlcResolution.ClaimOutpoint
-
- // spendTxID will be the ultimate spend of the claimOutpoint.
- // We set it to the commit spend for now, as this is the
- // ultimate spend in case this is a remote commitment. If we go
- // through the second-level transaction, we'll update this
- // accordingly.
- spendTxID = commitSpend.SpenderTxHash
-
- reports []*channeldb.ResolverReport
- )
-
- switch {
-
- // If we swept an HTLC directly off the remote party's commitment
- // transaction, then we can exit here as there's no second level sweep
- // to do.
- case h.htlcResolution.SignedTimeoutTx == nil:
- break
-
- // If the sweeper is handling the second level transaction, wait for
- // the CSV and possible CLTV lock to expire, before sweeping the output
- // on the second-level.
- case h.htlcResolution.SignDetails != nil:
- waitHeight := h.deriveWaitHeight(
- h.htlcResolution.CsvDelay, commitSpend,
- )
-
- h.reportLock.Lock()
- h.currentReport.Stage = 2
- h.currentReport.MaturityHeight = waitHeight
- h.reportLock.Unlock()
-
- if h.hasCLTV() {
- log.Infof("%T(%x): waiting for CSV and CLTV lock to "+
- "expire at height %v", h, h.htlc.RHash[:],
- waitHeight)
- } else {
- log.Infof("%T(%x): waiting for CSV lock to expire at "+
- "height %v", h, h.htlc.RHash[:], waitHeight)
- }
-
- // Deduct one block so this input is offered to the sweeper one
- // block earlier since the sweeper will wait for one block to
- // trigger the sweeping.
- //
- // TODO(yy): this is done so the outputs can be aggregated
- // properly. Suppose CSV locks of five 2nd-level outputs all
- // expire at height 840000, there is a race in block digestion
- // between contractcourt and sweeper:
- // - G1: block 840000 received in contractcourt, it now offers
- // the outputs to the sweeper.
- // - G2: block 840000 received in sweeper, it now starts to
- // sweep the received outputs - there's no guarantee all
- // fives have been received.
- // To solve this, we either offer the outputs earlier, or
- // implement `blockbeat`, and force contractcourt and sweeper
- // to consume each block sequentially.
- waitHeight--
-
- // TODO(yy): let sweeper handles the wait?
- err := waitForHeight(waitHeight, h.Notifier, h.quit)
- if err != nil {
- return nil, err
- }
-
- // We'll use this input index to determine the second-level
- // output index on the transaction, as the signatures requires
- // the indexes to be the same. We don't look for the
- // second-level output script directly, as there might be more
- // than one HTLC output to the same pkScript.
- op := &wire.OutPoint{
- Hash: *commitSpend.SpenderTxHash,
- Index: commitSpend.SpenderInputIndex,
- }
-
- var csvWitnessType input.StandardWitnessType
- if h.isTaproot() {
- //nolint:lll
- csvWitnessType = input.TaprootHtlcOfferedTimeoutSecondLevel
- } else {
- csvWitnessType = input.HtlcOfferedTimeoutSecondLevel
- }
-
- // Let the sweeper sweep the second-level output now that the
- // CSV/CLTV locks have expired.
- inp := h.makeSweepInput(
- op, csvWitnessType,
- input.LeaseHtlcOfferedTimeoutSecondLevel,
- &h.htlcResolution.SweepSignDesc,
- h.htlcResolution.CsvDelay,
- uint32(commitSpend.SpendingHeight), h.htlc.RHash,
- h.htlcResolution.ResolutionBlob,
- )
-
- // Calculate the budget for this sweep.
- budget := calculateBudget(
- btcutil.Amount(inp.SignDesc().Output.Value),
- h.Budget.NoDeadlineHTLCRatio,
- h.Budget.NoDeadlineHTLC,
- )
-
- log.Infof("%T(%x): offering second-level timeout tx output to "+
- "sweeper with no deadline and budget=%v at height=%v",
- h, h.htlc.RHash[:], budget, waitHeight)
-
- _, err = h.Sweeper.SweepInput(
- inp,
- sweep.Params{
- Budget: budget,
-
- // For second level success tx, there's no rush
- // to get it confirmed, so we use a nil
- // deadline.
- DeadlineHeight: fn.None[int32](),
- },
- )
- if err != nil {
- return nil, err
- }
-
- // Update the claim outpoint to point to the second-level
- // transaction created by the sweeper.
- claimOutpoint = *op
- fallthrough
-
- // Finally, if this was an output on our commitment transaction, we'll
- // wait for the second-level HTLC output to be spent, and for that
- // transaction itself to confirm.
- case h.htlcResolution.SignedTimeoutTx != nil:
- log.Infof("%T(%v): waiting for nursery/sweeper to spend CSV "+
- "delayed output", h, claimOutpoint)
-
- sweepTx, err := waitForSpend(
- &claimOutpoint,
- h.htlcResolution.SweepSignDesc.Output.PkScript,
- h.broadcastHeight, h.Notifier, h.quit,
- )
- if err != nil {
- return nil, err
- }
-
- // Update the spend txid to the hash of the sweep transaction.
- spendTxID = sweepTx.SpenderTxHash
-
- // Once our sweep of the timeout tx has confirmed, we add a
- // resolution for our timeoutTx tx first stage transaction.
- timeoutTx := commitSpend.SpendingTx
- index := commitSpend.SpenderInputIndex
- spendHash := commitSpend.SpenderTxHash
-
- reports = append(reports, &channeldb.ResolverReport{
- OutPoint: timeoutTx.TxIn[index].PreviousOutPoint,
- Amount: h.htlc.Amt.ToSatoshis(),
- ResolverType: channeldb.ResolverTypeOutgoingHtlc,
- ResolverOutcome: channeldb.ResolverOutcomeFirstStage,
- SpendTxID: spendHash,
- })
- }
-
- // With the clean up message sent, we'll now mark the contract
- // resolved, update the recovered balance, record the timeout and the
- // sweep txid on disk, and wait.
- h.resolved = true
- h.reportLock.Lock()
- h.currentReport.RecoveredBalance = h.currentReport.LimboBalance
- h.currentReport.LimboBalance = 0
- h.reportLock.Unlock()
-
- amt := btcutil.Amount(h.htlcResolution.SweepSignDesc.Output.Value)
- reports = append(reports, &channeldb.ResolverReport{
- OutPoint: claimOutpoint,
- Amount: amt,
- ResolverType: channeldb.ResolverTypeOutgoingHtlc,
- ResolverOutcome: channeldb.ResolverOutcomeTimeout,
- SpendTxID: spendTxID,
- })
-
- return nil, h.Checkpoint(h, reports...)
-}
-
// Stop signals the resolver to cancel any current resolution processes, and
// suspend.
//
// NOTE: Part of the ContractResolver interface.
func (h *htlcTimeoutResolver) Stop() {
- close(h.quit)
-}
+ h.log.Debugf("stopping...")
+ defer h.log.Debugf("stopped")
-// IsResolved returns true if the stored state in the resolve is fully
-// resolved. In this case the target output can be forgotten.
-//
-// NOTE: Part of the ContractResolver interface.
-func (h *htlcTimeoutResolver) IsResolved() bool {
- return h.resolved
+ close(h.quit)
}
// report returns a report on the resolution state of the contract.
@@ -1003,7 +691,7 @@ func (h *htlcTimeoutResolver) Encode(w io.Writer) error {
if err := binary.Write(w, endian, h.outputIncubating); err != nil {
return err
}
- if err := binary.Write(w, endian, h.resolved); err != nil {
+ if err := binary.Write(w, endian, h.IsResolved()); err != nil {
return err
}
if err := binary.Write(w, endian, h.broadcastHeight); err != nil {
@@ -1044,9 +732,13 @@ func newTimeoutResolverFromReader(r io.Reader, resCfg ResolverConfig) (
if err := binary.Read(r, endian, &h.outputIncubating); err != nil {
return nil, err
}
- if err := binary.Read(r, endian, &h.resolved); err != nil {
+
+ var resolved bool
+ if err := binary.Read(r, endian, &resolved); err != nil {
return nil, err
}
+ h.resolved.Store(resolved)
+
if err := binary.Read(r, endian, &h.broadcastHeight); err != nil {
return nil, err
}
@@ -1066,6 +758,7 @@ func newTimeoutResolverFromReader(r io.Reader, resCfg ResolverConfig) (
}
h.initReport()
+ h.initLogger(fmt.Sprintf("%T(%v)", h, h.outpoint()))
return h, nil
}
@@ -1173,12 +866,6 @@ func (h *htlcTimeoutResolver) consumeSpendEvents(resultChan chan *spendResult,
// Create a result chan to hold the results.
result := &spendResult{}
- // hasMempoolSpend is a flag that indicates whether we have found a
- // preimage spend from the mempool. This is used to determine whether
- // to checkpoint the resolver or not when later we found the
- // corresponding block spend.
- hasMempoolSpent := false
-
// Wait for a spend event to arrive.
for {
select {
@@ -1206,23 +893,6 @@ func (h *htlcTimeoutResolver) consumeSpendEvents(resultChan chan *spendResult,
// Once confirmed, persist the state on disk if
// we haven't seen the output's spending tx in
// mempool before.
- //
- // NOTE: we don't checkpoint the resolver if
- // it's spending tx has already been found in
- // mempool - the resolver will take care of the
- // checkpoint in its `claimCleanUp`. If we do
- // checkpoint here, however, we'd create a new
- // record in db for the same htlc resolver
- // which won't be cleaned up later, resulting
- // the channel to stay in unresolved state.
- //
- // TODO(yy): when fee bumper is implemented, we
- // need to further check whether this is a
- // preimage spend. Also need to refactor here
- // to save us some indentation.
- if !hasMempoolSpent {
- result.err = h.checkPointSecondLevelTx()
- }
}
// Send the result and exit the loop.
@@ -1256,7 +926,7 @@ func (h *htlcTimeoutResolver) consumeSpendEvents(resultChan chan *spendResult,
// continue the loop.
hasPreimage := isPreimageSpend(
h.isTaproot(), spendDetail,
- h.htlcResolution.SignedTimeoutTx != nil,
+ !h.isRemoteCommitOutput(),
)
if !hasPreimage {
log.Debugf("HTLC output %s spent doesn't "+
@@ -1269,10 +939,6 @@ func (h *htlcTimeoutResolver) consumeSpendEvents(resultChan chan *spendResult,
result.spend = spendDetail
resultChan <- result
- // Set the hasMempoolSpent flag to true so we won't
- // checkpoint the resolver again in db.
- hasMempoolSpent = true
-
continue
// If the resolver exits, we exit the goroutine.
@@ -1284,3 +950,403 @@ func (h *htlcTimeoutResolver) consumeSpendEvents(resultChan chan *spendResult,
}
}
}
+
+// isRemoteCommitOutput returns a bool to indicate whether the htlc output is
+// on the remote commitment.
+func (h *htlcTimeoutResolver) isRemoteCommitOutput() bool {
+ // If we don't have a timeout transaction, then this means that this is
+ // an output on the remote party's commitment transaction.
+ return h.htlcResolution.SignedTimeoutTx == nil
+}
+
+// isZeroFeeOutput returns a boolean indicating whether the htlc output is from
+// a anchor-enabled channel, which uses the sighash SINGLE|ANYONECANPAY.
+func (h *htlcTimeoutResolver) isZeroFeeOutput() bool {
+ // If we have non-nil SignDetails, this means it has a 2nd level HTLC
+ // transaction that is signed using sighash SINGLE|ANYONECANPAY (the
+ // case for anchor type channels). In this case we can re-sign it and
+ // attach fees at will.
+ return h.htlcResolution.SignedTimeoutTx != nil &&
+ h.htlcResolution.SignDetails != nil
+}
+
+// waitHtlcSpendAndCheckPreimage waits for the htlc output to be spent and
+// checks whether the spending reveals the preimage. If the preimage is found,
+// it will be added to the preimage beacon to settle the incoming link, and a
+// nil spend details will be returned. Otherwise, the spend details will be
+// returned, indicating this is a non-preimage spend.
+func (h *htlcTimeoutResolver) waitHtlcSpendAndCheckPreimage() (
+ *chainntnfs.SpendDetail, error) {
+
+ // Wait for the htlc output to be spent, which can happen in one of the
+ // paths,
+ // 1. The remote party spends the htlc output using the preimage.
+ // 2. The local party spends the htlc timeout tx from the local
+ // commitment.
+ // 3. The local party spends the htlc output directlt from the remote
+ // commitment.
+ spend, err := h.watchHtlcSpend()
+ if err != nil {
+ return nil, err
+ }
+
+ // If the spend reveals the pre-image, then we'll enter the clean up
+ // workflow to pass the preimage back to the incoming link, add it to
+ // the witness cache, and exit.
+ if isPreimageSpend(h.isTaproot(), spend, !h.isRemoteCommitOutput()) {
+ return nil, h.claimCleanUp(spend)
+ }
+
+ return spend, nil
+}
+
+// sweepTimeoutTxOutput attempts to sweep the output of the second level
+// timeout tx.
+func (h *htlcTimeoutResolver) sweepTimeoutTxOutput() error {
+ h.log.Debugf("sweeping output %v from 2nd-level HTLC timeout tx",
+ h.htlcResolution.ClaimOutpoint)
+
+ // This should be non-blocking as we will only attempt to sweep the
+ // output when the second level tx has already been confirmed. In other
+ // words, waitHtlcSpendAndCheckPreimage will return immediately.
+ commitSpend, err := h.waitHtlcSpendAndCheckPreimage()
+ if err != nil {
+ return err
+ }
+
+ // Exit early if the spend is nil, as this means it's a remote spend
+ // using the preimage path, which is handled in claimCleanUp.
+ if commitSpend == nil {
+ h.log.Infof("preimage spend detected, skipping 2nd-level " +
+ "HTLC output sweep")
+
+ return nil
+ }
+
+ waitHeight := h.deriveWaitHeight(h.htlcResolution.CsvDelay, commitSpend)
+
+ // Now that the sweeper has broadcasted the second-level transaction,
+ // it has confirmed, and we have checkpointed our state, we'll sweep
+ // the second level output. We report the resolver has moved the next
+ // stage.
+ h.reportLock.Lock()
+ h.currentReport.Stage = 2
+ h.currentReport.MaturityHeight = waitHeight
+ h.reportLock.Unlock()
+
+ if h.hasCLTV() {
+ h.log.Infof("waiting for CSV and CLTV lock to expire at "+
+ "height %v", waitHeight)
+ } else {
+ h.log.Infof("waiting for CSV lock to expire at height %v",
+ waitHeight)
+ }
+
+ // We'll use this input index to determine the second-level output
+ // index on the transaction, as the signatures requires the indexes to
+ // be the same. We don't look for the second-level output script
+ // directly, as there might be more than one HTLC output to the same
+ // pkScript.
+ op := &wire.OutPoint{
+ Hash: *commitSpend.SpenderTxHash,
+ Index: commitSpend.SpenderInputIndex,
+ }
+
+ var witType input.StandardWitnessType
+ if h.isTaproot() {
+ witType = input.TaprootHtlcOfferedTimeoutSecondLevel
+ } else {
+ witType = input.HtlcOfferedTimeoutSecondLevel
+ }
+
+ // Let the sweeper sweep the second-level output now that the CSV/CLTV
+ // locks have expired.
+ inp := h.makeSweepInput(
+ op, witType,
+ input.LeaseHtlcOfferedTimeoutSecondLevel,
+ &h.htlcResolution.SweepSignDesc,
+ h.htlcResolution.CsvDelay, uint32(commitSpend.SpendingHeight),
+ h.htlc.RHash, h.htlcResolution.ResolutionBlob,
+ )
+
+ // Calculate the budget for this sweep.
+ budget := calculateBudget(
+ btcutil.Amount(inp.SignDesc().Output.Value),
+ h.Budget.NoDeadlineHTLCRatio,
+ h.Budget.NoDeadlineHTLC,
+ )
+
+ h.log.Infof("offering output from 2nd-level timeout tx to sweeper "+
+ "with no deadline and budget=%v", budget)
+
+ // TODO(yy): use the result chan returned from SweepInput.
+ _, err = h.Sweeper.SweepInput(
+ inp,
+ sweep.Params{
+ Budget: budget,
+
+ // For second level success tx, there's no rush
+ // to get it confirmed, so we use a nil
+ // deadline.
+ DeadlineHeight: fn.None[int32](),
+ },
+ )
+
+ return err
+}
+
+// checkpointStageOne creates a checkpoint for the first stage of the htlc
+// timeout transaction. This is used to ensure that the resolver can resume
+// watching for the second stage spend in case of a restart.
+func (h *htlcTimeoutResolver) checkpointStageOne(
+ spendTxid chainhash.Hash) error {
+
+ h.log.Debugf("checkpoint stage one spend of HTLC output %v, spent "+
+ "in tx %v", h.outpoint(), spendTxid)
+
+ // Now that the second-level transaction has confirmed, we checkpoint
+ // the state so we'll go to the next stage in case of restarts.
+ h.outputIncubating = true
+
+ // Create stage-one report.
+ report := &channeldb.ResolverReport{
+ OutPoint: h.outpoint(),
+ Amount: h.htlc.Amt.ToSatoshis(),
+ ResolverType: channeldb.ResolverTypeOutgoingHtlc,
+ ResolverOutcome: channeldb.ResolverOutcomeFirstStage,
+ SpendTxID: &spendTxid,
+ }
+
+ // At this point, the second-level transaction is sufficiently
+ // confirmed. We can now send back our clean up message, failing the
+ // HTLC on the incoming link.
+ failureMsg := &lnwire.FailPermanentChannelFailure{}
+ err := h.DeliverResolutionMsg(ResolutionMsg{
+ SourceChan: h.ShortChanID,
+ HtlcIndex: h.htlc.HtlcIndex,
+ Failure: failureMsg,
+ })
+ if err != nil {
+ return err
+ }
+
+ return h.Checkpoint(h, report)
+}
+
+// checkpointClaim checkpoints the timeout resolver with the reports it needs.
+func (h *htlcTimeoutResolver) checkpointClaim(
+ spendDetail *chainntnfs.SpendDetail) error {
+
+ h.log.Infof("resolving htlc with incoming fail msg, output=%v "+
+ "confirmed in tx=%v", spendDetail.SpentOutPoint,
+ spendDetail.SpenderTxHash)
+
+ // For the direct-timeout spend, we will jump to this checkpoint
+ // without calling `checkpointStageOne`. Thus we need to send the clean
+ // up msg to fail the incoming HTLC.
+ if h.isRemoteCommitOutput() {
+ failureMsg := &lnwire.FailPermanentChannelFailure{}
+ err := h.DeliverResolutionMsg(ResolutionMsg{
+ SourceChan: h.ShortChanID,
+ HtlcIndex: h.htlc.HtlcIndex,
+ Failure: failureMsg,
+ })
+ if err != nil {
+ return err
+ }
+ }
+
+ // Send notification.
+ h.ChainArbitratorConfig.HtlcNotifier.NotifyFinalHtlcEvent(
+ models.CircuitKey{
+ ChanID: h.ShortChanID,
+ HtlcID: h.htlc.HtlcIndex,
+ },
+ channeldb.FinalHtlcInfo{
+ Settled: true,
+ Offchain: false,
+ },
+ )
+
+ // Create a resolver report for claiming of the htlc itself.
+ amt := btcutil.Amount(h.htlcResolution.SweepSignDesc.Output.Value)
+ report := &channeldb.ResolverReport{
+ OutPoint: *spendDetail.SpentOutPoint,
+ Amount: amt,
+ ResolverType: channeldb.ResolverTypeOutgoingHtlc,
+ ResolverOutcome: channeldb.ResolverOutcomeTimeout,
+ SpendTxID: spendDetail.SpenderTxHash,
+ }
+
+ // Finally, we checkpoint the resolver with our report(s).
+ h.resolved.Store(true)
+
+ return h.Checkpoint(h, report)
+}
+
+// resolveRemoteCommitOutput handles sweeping an HTLC output on the remote
+// commitment with via the timeout path. In this case we can sweep the output
+// directly, and don't have to broadcast a second-level transaction.
+func (h *htlcTimeoutResolver) resolveRemoteCommitOutput() error {
+ h.log.Debug("waiting for direct-timeout spend of the htlc to confirm")
+
+ // Wait for the direct-timeout HTLC sweep tx to confirm.
+ spend, err := h.watchHtlcSpend()
+ if err != nil {
+ return err
+ }
+
+ // If the spend reveals the preimage, then we'll enter the clean up
+ // workflow to pass the preimage back to the incoming link, add it to
+ // the witness cache, and exit.
+ if isPreimageSpend(h.isTaproot(), spend, !h.isRemoteCommitOutput()) {
+ return h.claimCleanUp(spend)
+ }
+
+ // TODO(yy): should also update the `RecoveredBalance` and
+ // `LimboBalance` like other paths?
+
+ // Checkpoint the resolver, and write the outcome to disk.
+ return h.checkpointClaim(spend)
+}
+
+// resolveTimeoutTx waits for the sweeping tx of the second-level
+// timeout tx to confirm and offers the output from the timeout tx to the
+// sweeper.
+func (h *htlcTimeoutResolver) resolveTimeoutTx() error {
+ h.log.Debug("waiting for first-stage 2nd-level HTLC timeout tx to " +
+ "confirm")
+
+ // Wait for the second level transaction to confirm.
+ spend, err := h.watchHtlcSpend()
+ if err != nil {
+ return err
+ }
+
+ // If the spend reveals the preimage, then we'll enter the clean up
+ // workflow to pass the preimage back to the incoming link, add it to
+ // the witness cache, and exit.
+ if isPreimageSpend(h.isTaproot(), spend, !h.isRemoteCommitOutput()) {
+ return h.claimCleanUp(spend)
+ }
+
+ op := h.htlcResolution.ClaimOutpoint
+ spenderTxid := *spend.SpenderTxHash
+
+ // If the timeout tx is a re-signed tx, we will need to find the actual
+ // spent outpoint from the spending tx.
+ if h.isZeroFeeOutput() {
+ op = wire.OutPoint{
+ Hash: spenderTxid,
+ Index: spend.SpenderInputIndex,
+ }
+ }
+
+ // If the 2nd-stage sweeping has already been started, we can
+ // fast-forward to start the resolving process for the stage two
+ // output.
+ if h.outputIncubating {
+ return h.resolveTimeoutTxOutput(op)
+ }
+
+ h.log.Infof("2nd-level HTLC timeout tx=%v confirmed", spenderTxid)
+
+ // Start the process to sweep the output from the timeout tx.
+ if h.isZeroFeeOutput() {
+ err = h.sweepTimeoutTxOutput()
+ if err != nil {
+ return err
+ }
+ }
+
+ // Create a checkpoint since the timeout tx is confirmed and the sweep
+ // request has been made.
+ if err := h.checkpointStageOne(spenderTxid); err != nil {
+ return err
+ }
+
+ // Start the resolving process for the stage two output.
+ return h.resolveTimeoutTxOutput(op)
+}
+
+// resolveTimeoutTxOutput waits for the spend of the output from the 2nd-level
+// timeout tx.
+func (h *htlcTimeoutResolver) resolveTimeoutTxOutput(op wire.OutPoint) error {
+ h.log.Debugf("waiting for second-stage 2nd-level timeout tx output %v "+
+ "to be spent after csv_delay=%v", op, h.htlcResolution.CsvDelay)
+
+ spend, err := waitForSpend(
+ &op, h.htlcResolution.SweepSignDesc.Output.PkScript,
+ h.broadcastHeight, h.Notifier, h.quit,
+ )
+ if err != nil {
+ return err
+ }
+
+ h.reportLock.Lock()
+ h.currentReport.RecoveredBalance = h.currentReport.LimboBalance
+ h.currentReport.LimboBalance = 0
+ h.reportLock.Unlock()
+
+ return h.checkpointClaim(spend)
+}
+
+// Launch creates an input based on the details of the outgoing htlc resolution
+// and offers it to the sweeper.
+func (h *htlcTimeoutResolver) Launch() error {
+ if h.launched.Load() {
+ h.log.Tracef("already launched")
+ return nil
+ }
+
+ h.log.Debugf("launching resolver...")
+ h.launched.Store(true)
+
+ // If the HTLC has custom records, then for now we'll pause resolution.
+ //
+ // TODO(roasbeef): Implement resolving HTLCs with custom records
+ // (follow-up PR).
+ if len(h.htlc.CustomRecords) != 0 {
+ select { //nolint:gosimple
+ case <-h.quit:
+ return nil
+ }
+ }
+
+ switch {
+ // If we're already resolved, then we can exit early.
+ case h.IsResolved():
+ h.log.Errorf("already resolved")
+ return nil
+
+ // If this is an output on the remote party's commitment transaction,
+ // use the direct timeout spend path.
+ //
+ // NOTE: When the outputIncubating is false, it means that the output
+ // has been offered to the utxo nursery as starting in 0.18.4, we
+ // stopped marking this flag for direct timeout spends (#9062). In that
+ // case, we will do nothing and let the utxo nursery handle it.
+ case h.isRemoteCommitOutput() && !h.outputIncubating:
+ return h.sweepDirectHtlcOutput()
+
+ // If this is an anchor type channel, we now sweep either the
+ // second-level timeout tx or the output from the second-level timeout
+ // tx.
+ case h.isZeroFeeOutput():
+ // If the second-level timeout tx has already been swept, we
+ // can go ahead and sweep its output.
+ if h.outputIncubating {
+ return h.sweepTimeoutTxOutput()
+ }
+
+ // Otherwise, sweep the second level tx.
+ return h.sweepTimeoutTx()
+
+ // If this is an output on our own commitment using pre-anchor channel
+ // type, we will let the utxo nursery handle it via Resolve.
+ //
+ // TODO(yy): handle the legacy output by offering it to the sweeper.
+ default:
+ return nil
+ }
+}
diff --git a/contractcourt/htlc_timeout_resolver_test.go b/contractcourt/htlc_timeout_resolver_test.go
index 47be71d3ec..3f2f87228e 100644
--- a/contractcourt/htlc_timeout_resolver_test.go
+++ b/contractcourt/htlc_timeout_resolver_test.go
@@ -40,7 +40,7 @@ type mockWitnessBeacon struct {
func newMockWitnessBeacon() *mockWitnessBeacon {
return &mockWitnessBeacon{
preImageUpdates: make(chan lntypes.Preimage, 1),
- newPreimages: make(chan []lntypes.Preimage),
+ newPreimages: make(chan []lntypes.Preimage, 1),
lookupPreimage: make(map[lntypes.Hash]lntypes.Preimage),
}
}
@@ -280,7 +280,7 @@ func testHtlcTimeoutResolver(t *testing.T, testCase htlcTimeoutTestCase) {
notifier := &mock.ChainNotifier{
EpochChan: make(chan *chainntnfs.BlockEpoch),
- SpendChan: make(chan *chainntnfs.SpendDetail),
+ SpendChan: make(chan *chainntnfs.SpendDetail, 1),
ConfChan: make(chan *chainntnfs.TxConfirmation),
}
@@ -321,6 +321,7 @@ func testHtlcTimeoutResolver(t *testing.T, testCase htlcTimeoutTestCase) {
return nil
},
+ HtlcNotifier: &mockHTLCNotifier{},
},
PutResolverReport: func(_ kvdb.RwTx,
_ *channeldb.ResolverReport) error {
@@ -356,6 +357,7 @@ func testHtlcTimeoutResolver(t *testing.T, testCase htlcTimeoutTestCase) {
Amt: testHtlcAmt,
},
}
+ resolver.initLogger("timeoutResolver")
var reports []*channeldb.ResolverReport
@@ -390,7 +392,12 @@ func testHtlcTimeoutResolver(t *testing.T, testCase htlcTimeoutTestCase) {
go func() {
defer wg.Done()
- _, err := resolver.Resolve(false)
+ err := resolver.Launch()
+ if err != nil {
+ resolveErr <- err
+ }
+
+ _, err = resolver.Resolve()
if err != nil {
resolveErr <- err
}
@@ -406,8 +413,7 @@ func testHtlcTimeoutResolver(t *testing.T, testCase htlcTimeoutTestCase) {
sweepChan = mockSweeper.sweptInputs
}
- // The output should be offered to either the sweeper or
- // the nursery.
+ // The output should be offered to either the sweeper or the nursery.
select {
case <-incubateChan:
case <-sweepChan:
@@ -431,6 +437,7 @@ func testHtlcTimeoutResolver(t *testing.T, testCase htlcTimeoutTestCase) {
case notifier.SpendChan <- &chainntnfs.SpendDetail{
SpendingTx: spendingTx,
SpenderTxHash: &spendTxHash,
+ SpentOutPoint: &testChanPoint2,
}:
case <-time.After(time.Second * 5):
t.Fatalf("failed to request spend ntfn")
@@ -487,6 +494,7 @@ func testHtlcTimeoutResolver(t *testing.T, testCase htlcTimeoutTestCase) {
case notifier.SpendChan <- &chainntnfs.SpendDetail{
SpendingTx: spendingTx,
SpenderTxHash: &spendTxHash,
+ SpentOutPoint: &testChanPoint2,
}:
case <-time.After(time.Second * 5):
t.Fatalf("failed to request spend ntfn")
@@ -524,7 +532,7 @@ func testHtlcTimeoutResolver(t *testing.T, testCase htlcTimeoutTestCase) {
wg.Wait()
// Finally, the resolver should be marked as resolved.
- if !resolver.resolved {
+ if !resolver.resolved.Load() {
t.Fatalf("resolver should be marked as resolved")
}
}
@@ -549,6 +557,8 @@ func TestHtlcTimeoutResolver(t *testing.T) {
// TestHtlcTimeoutSingleStage tests a remote commitment confirming, and the
// local node sweeping the HTLC output directly after timeout.
+//
+//nolint:lll
func TestHtlcTimeoutSingleStage(t *testing.T) {
commitOutpoint := wire.OutPoint{Index: 3}
@@ -573,6 +583,12 @@ func TestHtlcTimeoutSingleStage(t *testing.T) {
SpendTxID: &sweepTxid,
}
+ sweepSpend := &chainntnfs.SpendDetail{
+ SpendingTx: sweepTx,
+ SpentOutPoint: &commitOutpoint,
+ SpenderTxHash: &sweepTxid,
+ }
+
checkpoints := []checkpoint{
{
// We send a confirmation the sweep tx from published
@@ -582,9 +598,10 @@ func TestHtlcTimeoutSingleStage(t *testing.T) {
// The nursery will create and publish a sweep
// tx.
- ctx.notifier.SpendChan <- &chainntnfs.SpendDetail{
- SpendingTx: sweepTx,
- SpenderTxHash: &sweepTxid,
+ select {
+ case ctx.notifier.SpendChan <- sweepSpend:
+ case <-time.After(time.Second * 5):
+ t.Fatalf("failed to send spend ntfn")
}
// The resolver should deliver a failure
@@ -620,7 +637,9 @@ func TestHtlcTimeoutSingleStage(t *testing.T) {
// TestHtlcTimeoutSecondStage tests a local commitment being confirmed, and the
// local node claiming the HTLC output using the second-level timeout tx.
-func TestHtlcTimeoutSecondStage(t *testing.T) {
+//
+//nolint:lll
+func TestHtlcTimeoutSecondStagex(t *testing.T) {
commitOutpoint := wire.OutPoint{Index: 2}
htlcOutpoint := wire.OutPoint{Index: 3}
@@ -678,23 +697,57 @@ func TestHtlcTimeoutSecondStage(t *testing.T) {
SpendTxID: &sweepHash,
}
+ timeoutSpend := &chainntnfs.SpendDetail{
+ SpendingTx: timeoutTx,
+ SpentOutPoint: &commitOutpoint,
+ SpenderTxHash: &timeoutTxid,
+ }
+
+ sweepSpend := &chainntnfs.SpendDetail{
+ SpendingTx: sweepTx,
+ SpentOutPoint: &htlcOutpoint,
+ SpenderTxHash: &sweepHash,
+ }
+
checkpoints := []checkpoint{
{
+ preCheckpoint: func(ctx *htlcResolverTestContext,
+ _ bool) error {
+
+ // Deliver spend of timeout tx.
+ ctx.notifier.SpendChan <- timeoutSpend
+
+ return nil
+ },
+
// Output should be handed off to the nursery.
incubating: true,
+ reports: []*channeldb.ResolverReport{
+ firstStage,
+ },
},
{
// We send a confirmation for our sweep tx to indicate
// that our sweep succeeded.
preCheckpoint: func(ctx *htlcResolverTestContext,
- _ bool) error {
+ resumed bool) error {
- // The nursery will publish the timeout tx.
- ctx.notifier.SpendChan <- &chainntnfs.SpendDetail{
- SpendingTx: timeoutTx,
- SpenderTxHash: &timeoutTxid,
+ // When it's reloaded from disk, we need to
+ // re-send the notification to mock the first
+ // `watchHtlcSpend`.
+ if resumed {
+ // Deliver spend of timeout tx.
+ ctx.notifier.SpendChan <- timeoutSpend
+
+ // Deliver spend of timeout tx output.
+ ctx.notifier.SpendChan <- sweepSpend
+
+ return nil
}
+ // Deliver spend of timeout tx output.
+ ctx.notifier.SpendChan <- sweepSpend
+
// The resolver should deliver a failure
// resolution message (indicating we
// successfully timed out the HTLC).
@@ -707,12 +760,6 @@ func TestHtlcTimeoutSecondStage(t *testing.T) {
t.Fatalf("resolution not sent")
}
- // Deliver spend of timeout tx.
- ctx.notifier.SpendChan <- &chainntnfs.SpendDetail{
- SpendingTx: sweepTx,
- SpenderTxHash: &sweepHash,
- }
-
return nil
},
@@ -722,7 +769,7 @@ func TestHtlcTimeoutSecondStage(t *testing.T) {
incubating: true,
resolved: true,
reports: []*channeldb.ResolverReport{
- firstStage, secondState,
+ secondState,
},
},
}
@@ -796,10 +843,6 @@ func TestHtlcTimeoutSingleStageRemoteSpend(t *testing.T) {
}
checkpoints := []checkpoint{
- {
- // Output should be handed off to the nursery.
- incubating: true,
- },
{
// We send a spend notification for a remote spend with
// the preimage.
@@ -812,6 +855,7 @@ func TestHtlcTimeoutSingleStageRemoteSpend(t *testing.T) {
// the preimage.
ctx.notifier.SpendChan <- &chainntnfs.SpendDetail{
SpendingTx: spendTx,
+ SpentOutPoint: &commitOutpoint,
SpenderTxHash: &spendTxHash,
}
@@ -847,7 +891,7 @@ func TestHtlcTimeoutSingleStageRemoteSpend(t *testing.T) {
// After the success tx has confirmed, we expect the
// checkpoint to be resolved, and with the above
// report.
- incubating: true,
+ incubating: false,
resolved: true,
reports: []*channeldb.ResolverReport{
claim,
@@ -914,6 +958,7 @@ func TestHtlcTimeoutSecondStageRemoteSpend(t *testing.T) {
ctx.notifier.SpendChan <- &chainntnfs.SpendDetail{
SpendingTx: remoteSuccessTx,
+ SpentOutPoint: &commitOutpoint,
SpenderTxHash: &successTxid,
}
@@ -967,20 +1012,15 @@ func TestHtlcTimeoutSecondStageRemoteSpend(t *testing.T) {
// TestHtlcTimeoutSecondStageSweeper tests that for anchor channels, when a
// local commitment confirms, the timeout tx is handed to the sweeper to claim
// the HTLC output.
+//
+//nolint:lll
func TestHtlcTimeoutSecondStageSweeper(t *testing.T) {
- commitOutpoint := wire.OutPoint{Index: 2}
htlcOutpoint := wire.OutPoint{Index: 3}
- sweepTx := &wire.MsgTx{
- TxIn: []*wire.TxIn{{}},
- TxOut: []*wire.TxOut{{}},
- }
- sweepHash := sweepTx.TxHash()
-
timeoutTx := &wire.MsgTx{
TxIn: []*wire.TxIn{
{
- PreviousOutPoint: commitOutpoint,
+ PreviousOutPoint: htlcOutpoint,
},
},
TxOut: []*wire.TxOut{
@@ -1027,11 +1067,16 @@ func TestHtlcTimeoutSecondStageSweeper(t *testing.T) {
},
}
reSignedHash := reSignedTimeoutTx.TxHash()
- reSignedOutPoint := wire.OutPoint{
+
+ timeoutTxOutpoint := wire.OutPoint{
Hash: reSignedHash,
Index: 1,
}
+ // Make a copy so `isPreimageSpend` can easily pass.
+ sweepTx := reSignedTimeoutTx.Copy()
+ sweepHash := sweepTx.TxHash()
+
// twoStageResolution is a resolution for a htlc on the local
// party's commitment, where the timeout tx can be re-signed.
twoStageResolution := lnwallet.OutgoingHtlcResolution{
@@ -1045,7 +1090,7 @@ func TestHtlcTimeoutSecondStageSweeper(t *testing.T) {
}
firstStage := &channeldb.ResolverReport{
- OutPoint: commitOutpoint,
+ OutPoint: htlcOutpoint,
Amount: testHtlcAmt.ToSatoshis(),
ResolverType: channeldb.ResolverTypeOutgoingHtlc,
ResolverOutcome: channeldb.ResolverOutcomeFirstStage,
@@ -1053,12 +1098,45 @@ func TestHtlcTimeoutSecondStageSweeper(t *testing.T) {
}
secondState := &channeldb.ResolverReport{
- OutPoint: reSignedOutPoint,
+ OutPoint: timeoutTxOutpoint,
Amount: btcutil.Amount(testSignDesc.Output.Value),
ResolverType: channeldb.ResolverTypeOutgoingHtlc,
ResolverOutcome: channeldb.ResolverOutcomeTimeout,
SpendTxID: &sweepHash,
}
+ // mockTimeoutTxSpend is a helper closure to mock `waitForSpend` to
+ // return the commit spend in `sweepTimeoutTxOutput`.
+ mockTimeoutTxSpend := func(ctx *htlcResolverTestContext) {
+ select {
+ case ctx.notifier.SpendChan <- &chainntnfs.SpendDetail{
+ SpendingTx: reSignedTimeoutTx,
+ SpenderInputIndex: 1,
+ SpenderTxHash: &reSignedHash,
+ SpendingHeight: 10,
+ SpentOutPoint: &htlcOutpoint,
+ }:
+
+ case <-time.After(time.Second * 1):
+ t.Fatalf("spend not sent")
+ }
+ }
+
+ // mockSweepTxSpend is a helper closure to mock `waitForSpend` to
+ // return the commit spend in `sweepTimeoutTxOutput`.
+ mockSweepTxSpend := func(ctx *htlcResolverTestContext) {
+ select {
+ case ctx.notifier.SpendChan <- &chainntnfs.SpendDetail{
+ SpendingTx: sweepTx,
+ SpenderInputIndex: 1,
+ SpenderTxHash: &sweepHash,
+ SpendingHeight: 10,
+ SpentOutPoint: &timeoutTxOutpoint,
+ }:
+
+ case <-time.After(time.Second * 1):
+ t.Fatalf("spend not sent")
+ }
+ }
checkpoints := []checkpoint{
{
@@ -1067,28 +1145,40 @@ func TestHtlcTimeoutSecondStageSweeper(t *testing.T) {
_ bool) error {
resolver := ctx.resolver.(*htlcTimeoutResolver)
- inp := <-resolver.Sweeper.(*mockSweeper).sweptInputs
+
+ var (
+ inp input.Input
+ ok bool
+ )
+
+ select {
+ case inp, ok = <-resolver.Sweeper.(*mockSweeper).sweptInputs:
+ require.True(t, ok)
+
+ case <-time.After(1 * time.Second):
+ t.Fatal("expected input to be swept")
+ }
+
op := inp.OutPoint()
- if op != commitOutpoint {
+ if op != htlcOutpoint {
return fmt.Errorf("outpoint %v swept, "+
- "expected %v", op,
- commitOutpoint)
+ "expected %v", op, htlcOutpoint)
}
- // Emulat the sweeper spending using the
- // re-signed timeout tx.
- ctx.notifier.SpendChan <- &chainntnfs.SpendDetail{
- SpendingTx: reSignedTimeoutTx,
- SpenderInputIndex: 1,
- SpenderTxHash: &reSignedHash,
- SpendingHeight: 10,
- }
+ // Mock `waitForSpend` twice, called in,
+ // - `resolveReSignedTimeoutTx`
+ // - `sweepTimeoutTxOutput`.
+ mockTimeoutTxSpend(ctx)
+ mockTimeoutTxSpend(ctx)
return nil
},
// incubating=true is used to signal that the
// second-level transaction was confirmed.
incubating: true,
+ reports: []*channeldb.ResolverReport{
+ firstStage,
+ },
},
{
// We send a confirmation for our sweep tx to indicate
@@ -1096,18 +1186,18 @@ func TestHtlcTimeoutSecondStageSweeper(t *testing.T) {
preCheckpoint: func(ctx *htlcResolverTestContext,
resumed bool) error {
- // If we are resuming from a checkpoint, we
- // expect the resolver to re-subscribe to a
- // spend, hence we must resend it.
+ // Mock `waitForSpend` to return the commit
+ // spend.
if resumed {
- ctx.notifier.SpendChan <- &chainntnfs.SpendDetail{
- SpendingTx: reSignedTimeoutTx,
- SpenderInputIndex: 1,
- SpenderTxHash: &reSignedHash,
- SpendingHeight: 10,
- }
+ mockTimeoutTxSpend(ctx)
+ mockTimeoutTxSpend(ctx)
+ mockSweepTxSpend(ctx)
+
+ return nil
}
+ mockSweepTxSpend(ctx)
+
// The resolver should deliver a failure
// resolution message (indicating we
// successfully timed out the HTLC).
@@ -1120,15 +1210,23 @@ func TestHtlcTimeoutSecondStageSweeper(t *testing.T) {
t.Fatalf("resolution not sent")
}
- // Mimic CSV lock expiring.
- ctx.notifier.EpochChan <- &chainntnfs.BlockEpoch{
- Height: 13,
- }
-
// The timeout tx output should now be given to
// the sweeper.
resolver := ctx.resolver.(*htlcTimeoutResolver)
- inp := <-resolver.Sweeper.(*mockSweeper).sweptInputs
+
+ var (
+ inp input.Input
+ ok bool
+ )
+
+ select {
+ case inp, ok = <-resolver.Sweeper.(*mockSweeper).sweptInputs:
+ require.True(t, ok)
+
+ case <-time.After(1 * time.Second):
+ t.Fatal("expected input to be swept")
+ }
+
op := inp.OutPoint()
exp := wire.OutPoint{
Hash: reSignedHash,
@@ -1138,14 +1236,6 @@ func TestHtlcTimeoutSecondStageSweeper(t *testing.T) {
return fmt.Errorf("wrong outpoint swept")
}
- // Notify about the spend, which should resolve
- // the resolver.
- ctx.notifier.SpendChan <- &chainntnfs.SpendDetail{
- SpendingTx: sweepTx,
- SpenderTxHash: &sweepHash,
- SpendingHeight: 14,
- }
-
return nil
},
@@ -1155,7 +1245,6 @@ func TestHtlcTimeoutSecondStageSweeper(t *testing.T) {
incubating: true,
resolved: true,
reports: []*channeldb.ResolverReport{
- firstStage,
secondState,
},
},
@@ -1236,33 +1325,6 @@ func TestHtlcTimeoutSecondStageSweeperRemoteSpend(t *testing.T) {
}
checkpoints := []checkpoint{
- {
- // The output should be given to the sweeper.
- preCheckpoint: func(ctx *htlcResolverTestContext,
- _ bool) error {
-
- resolver := ctx.resolver.(*htlcTimeoutResolver)
- inp := <-resolver.Sweeper.(*mockSweeper).sweptInputs
- op := inp.OutPoint()
- if op != commitOutpoint {
- return fmt.Errorf("outpoint %v swept, "+
- "expected %v", op,
- commitOutpoint)
- }
-
- // Emulate the remote sweeping the output with the preimage.
- // re-signed timeout tx.
- ctx.notifier.SpendChan <- &chainntnfs.SpendDetail{
- SpendingTx: spendTx,
- SpenderTxHash: &spendTxHash,
- }
-
- return nil
- },
- // incubating=true is used to signal that the
- // second-level transaction was confirmed.
- incubating: true,
- },
{
// We send a confirmation for our sweep tx to indicate
// that our sweep succeeded.
@@ -1277,6 +1339,7 @@ func TestHtlcTimeoutSecondStageSweeperRemoteSpend(t *testing.T) {
ctx.notifier.SpendChan <- &chainntnfs.SpendDetail{
SpendingTx: spendTx,
SpenderTxHash: &spendTxHash,
+ SpentOutPoint: &commitOutpoint,
}
}
@@ -1314,7 +1377,7 @@ func TestHtlcTimeoutSecondStageSweeperRemoteSpend(t *testing.T) {
// After the sweep has confirmed, we expect the
// checkpoint to be resolved, and with the above
// reports.
- incubating: true,
+ incubating: false,
resolved: true,
reports: []*channeldb.ResolverReport{
claim,
@@ -1339,21 +1402,26 @@ func testHtlcTimeout(t *testing.T, resolution lnwallet.OutgoingHtlcResolution,
// for the next portion of the test.
ctx := newHtlcResolverTestContext(t,
func(htlc channeldb.HTLC, cfg ResolverConfig) ContractResolver {
- return &htlcTimeoutResolver{
+ r := &htlcTimeoutResolver{
contractResolverKit: *newContractResolverKit(cfg),
htlc: htlc,
htlcResolution: resolution,
}
+ r.initLogger("htlcTimeoutResolver")
+
+ return r
},
)
checkpointedState := runFromCheckpoint(t, ctx, checkpoints)
+ t.Log("Running resolver to completion after restart")
+
// Now, from every checkpoint created, we re-create the resolver, and
// run the test from that checkpoint.
for i := range checkpointedState {
cp := bytes.NewReader(checkpointedState[i])
- ctx := newHtlcResolverTestContext(t,
+ ctx := newHtlcResolverTestContextFromReader(t,
func(htlc channeldb.HTLC, cfg ResolverConfig) ContractResolver {
resolver, err := newTimeoutResolverFromReader(cp, cfg)
if err != nil {
@@ -1361,7 +1429,8 @@ func testHtlcTimeout(t *testing.T, resolution lnwallet.OutgoingHtlcResolution,
}
resolver.Supplement(htlc)
- resolver.htlcResolution = resolution
+ resolver.initLogger("htlcTimeoutResolver")
+
return resolver
},
)
diff --git a/contractcourt/mock_registry_test.go b/contractcourt/mock_registry_test.go
index 5c75185623..1af857b4b2 100644
--- a/contractcourt/mock_registry_test.go
+++ b/contractcourt/mock_registry_test.go
@@ -29,6 +29,11 @@ func (r *mockRegistry) NotifyExitHopHtlc(payHash lntypes.Hash,
wireCustomRecords lnwire.CustomRecords,
payload invoices.Payload) (invoices.HtlcResolution, error) {
+ // Exit early if the notification channel is nil.
+ if hodlChan == nil {
+ return r.notifyResolution, r.notifyErr
+ }
+
r.notifyChan <- notifyExitHopData{
hodlChan: hodlChan,
payHash: payHash,
diff --git a/contractcourt/utxonursery.go b/contractcourt/utxonursery.go
index b7b4d33a8b..a920699a7b 100644
--- a/contractcourt/utxonursery.go
+++ b/contractcourt/utxonursery.go
@@ -793,7 +793,7 @@ func (u *UtxoNursery) graduateClass(classHeight uint32) error {
return err
}
- utxnLog.Infof("Attempting to graduate height=%v: num_kids=%v, "+
+ utxnLog.Debugf("Attempting to graduate height=%v: num_kids=%v, "+
"num_babies=%v", classHeight, len(kgtnOutputs), len(cribOutputs))
// Offer the outputs to the sweeper and set up notifications that will
diff --git a/docs/release-notes/release-notes-0.19.0.md b/docs/release-notes/release-notes-0.19.0.md
index aef92daba9..c9e8c38018 100644
--- a/docs/release-notes/release-notes-0.19.0.md
+++ b/docs/release-notes/release-notes-0.19.0.md
@@ -51,6 +51,12 @@
* Make sure the RPC clients used to access the chain backend are [properly
shutdown](https://github.com/lightningnetwork/lnd/pull/9261).
+* [Fixed a bug](https://github.com/lightningnetwork/lnd/pull/9275) where the
+ peer may block the shutdown process of lnd.
+
+* [Fixed a case](https://github.com/lightningnetwork/lnd/pull/9258) where the
+ confirmation notification may be missed.
+
# New Features
## Functional Enhancements
## RPC Additions
@@ -196,5 +202,5 @@ The underlying functionality between those two options remain the same.
* Oliver Gugger
* Pins
* Viktor Tigerström
+* Yong Yu
* Ziggie
-
diff --git a/fn/goroutine_manager.go b/fn/goroutine_manager.go
index 8c9ad8b2d0..81c538ea01 100644
--- a/fn/goroutine_manager.go
+++ b/fn/goroutine_manager.go
@@ -2,13 +2,9 @@ package fn
import (
"context"
- "errors"
"sync"
)
-// ErrStopping is returned when trying to add a new goroutine while stopping.
-var ErrStopping = errors.New("can not add goroutine, stopping")
-
// GoroutineManager is used to launch goroutines until context expires or the
// manager is stopped. The Stop method blocks until all started goroutines stop.
type GoroutineManager struct {
@@ -29,8 +25,10 @@ func NewGoroutineManager(ctx context.Context) *GoroutineManager {
}
}
-// Go starts a new goroutine if the manager is not stopping.
-func (g *GoroutineManager) Go(f func(ctx context.Context)) error {
+// Go tries to start a new goroutine and returns a boolean indicating its
+// success. It fails iff the goroutine manager is stopping or its context passed
+// to NewGoroutineManager has expired.
+func (g *GoroutineManager) Go(f func(ctx context.Context)) bool {
// Calling wg.Add(1) and wg.Wait() when wg's counter is 0 is a race
// condition, since it is not clear should Wait() block or not. This
// kind of race condition is detected by Go runtime and results in a
@@ -43,7 +41,7 @@ func (g *GoroutineManager) Go(f func(ctx context.Context)) error {
defer g.mu.Unlock()
if g.ctx.Err() != nil {
- return ErrStopping
+ return false
}
g.wg.Add(1)
@@ -52,7 +50,7 @@ func (g *GoroutineManager) Go(f func(ctx context.Context)) error {
f(g.ctx)
}()
- return nil
+ return true
}
// Stop prevents new goroutines from being added and waits for all running
@@ -66,7 +64,7 @@ func (g *GoroutineManager) Stop() {
// safe, since it can't run in parallel with wg.Add(1) call in Go, since
// we just cancelled the context and even if Go call starts running here
// after acquiring the mutex, it would see that the context has expired
- // and return ErrStopping instead of calling wg.Add(1).
+ // and return false instead of calling wg.Add(1).
g.wg.Wait()
}
diff --git a/fn/goroutine_manager_test.go b/fn/goroutine_manager_test.go
index d06a62b4a2..1fc945b97b 100644
--- a/fn/goroutine_manager_test.go
+++ b/fn/goroutine_manager_test.go
@@ -2,6 +2,7 @@ package fn
import (
"context"
+ "sync"
"testing"
"time"
@@ -19,7 +20,7 @@ func TestGoroutineManager(t *testing.T) {
taskChan := make(chan struct{})
- require.NoError(t, m.Go(func(ctx context.Context) {
+ require.True(t, m.Go(func(ctx context.Context) {
<-taskChan
}))
@@ -37,7 +38,7 @@ func TestGoroutineManager(t *testing.T) {
require.Greater(t, stopDelay, time.Second)
// Make sure new goroutines do not start after Stop.
- require.ErrorIs(t, m.Go(func(ctx context.Context) {}), ErrStopping)
+ require.False(t, m.Go(func(ctx context.Context) {}))
// When Stop() is called, the internal context expires and m.Done() is
// closed. Test this.
@@ -56,7 +57,7 @@ func TestGoroutineManagerContextExpires(t *testing.T) {
m := NewGoroutineManager(ctx)
- require.NoError(t, m.Go(func(ctx context.Context) {
+ require.True(t, m.Go(func(ctx context.Context) {
<-ctx.Done()
}))
@@ -79,7 +80,7 @@ func TestGoroutineManagerContextExpires(t *testing.T) {
}
// Make sure new goroutines do not start after context expiry.
- require.ErrorIs(t, m.Go(func(ctx context.Context) {}), ErrStopping)
+ require.False(t, m.Go(func(ctx context.Context) {}))
// Stop will wait for all goroutines to stop.
m.Stop()
@@ -107,11 +108,11 @@ func TestGoroutineManagerStress(t *testing.T) {
// implementation, this test crashes under `-race`.
for i := 0; i < 100; i++ {
taskChan := make(chan struct{})
- err := m.Go(func(ctx context.Context) {
+ ok := m.Go(func(ctx context.Context) {
close(taskChan)
})
// If goroutine was started, wait for its completion.
- if err == nil {
+ if ok {
<-taskChan
}
}
@@ -119,3 +120,38 @@ func TestGoroutineManagerStress(t *testing.T) {
// Wait for Stop to complete.
<-stopChan
}
+
+// TestGoroutineManagerStopsStress launches many Stop() calls in parallel with a
+// task exiting. It attempts to catch a race condition between wg.Done() and
+// wg.Wait() calls. According to documentation of wg.Wait() this is acceptable,
+// therefore this test passes even with -race.
+func TestGoroutineManagerStopsStress(t *testing.T) {
+ t.Parallel()
+
+ m := NewGoroutineManager(context.Background())
+
+ // jobChan is used to make the task to finish.
+ jobChan := make(chan struct{})
+
+ // Start a task and wait inside it until we start calling Stop() method.
+ ok := m.Go(func(ctx context.Context) {
+ <-jobChan
+ })
+ require.True(t, ok)
+
+ // Now launch many gorotines calling Stop() method in parallel.
+ var wg sync.WaitGroup
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ m.Stop()
+ }()
+ }
+
+ // Exit the task in parallel with Stop() calls.
+ close(jobChan)
+
+ // Wait until all the Stop() calls complete.
+ wg.Wait()
+}
diff --git a/htlcswitch/switch.go b/htlcswitch/switch.go
index cbc2a16dae..95ce149354 100644
--- a/htlcswitch/switch.go
+++ b/htlcswitch/switch.go
@@ -1605,7 +1605,7 @@ out:
}
}
- log.Infof("Received outside contract resolution, "+
+ log.Debugf("Received outside contract resolution, "+
"mapping to: %v", spew.Sdump(pkt))
// We don't check the error, as the only failure we can
@@ -2995,7 +2995,9 @@ func (s *Switch) handlePacketSettle(packet *htlcPacket) error {
// to lookup the origin.
circuit, err := s.closeCircuit(packet)
if err != nil {
- return err
+ if !errors.Is(err, ErrCircuitClosing) {
+ return err
+ }
}
// closeCircuit returns a nil circuit when a settle packet returns an
diff --git a/invoices/invoiceregistry.go b/invoices/invoiceregistry.go
index 9d54b6ad8d..93142b3d92 100644
--- a/invoices/invoiceregistry.go
+++ b/invoices/invoiceregistry.go
@@ -1806,6 +1806,12 @@ func (i *InvoiceRegistry) notifyHodlSubscribers(htlcResolution HtlcResolution) {
func (i *InvoiceRegistry) hodlSubscribe(subscriber chan<- interface{},
circuitKey CircuitKey) {
+ // If the caller decides to not subscribe to the resolution, we can
+ // exit early.
+ if subscriber == nil {
+ return
+ }
+
i.hodlSubscriptionsMux.Lock()
defer i.hodlSubscriptionsMux.Unlock()
diff --git a/itest/config.go b/itest/config.go
new file mode 100644
index 0000000000..98e6336528
--- /dev/null
+++ b/itest/config.go
@@ -0,0 +1,28 @@
+//go:build integration
+
+package itest
+
+import (
+ "os/exec"
+
+ "github.com/lightningnetwork/lnd/lntest"
+ "github.com/stretchr/testify/require"
+)
+
+// testDebuglevelShow tests that "lnd --debuglevel=show" command works and
+// prints the list of supported subsystems.
+func testDebuglevelShow(ht *lntest.HarnessTest) {
+ // We can't use ht.NewNode, because it adds more arguments to the
+ // command line (e.g. flags configuring bitcoin backend), but we want to
+ // make sure that "lnd --debuglevel=show" works without any other flags.
+ lndBinary := getLndBinary(ht.T)
+ cmd := exec.Command(lndBinary, "--debuglevel=show")
+ stdoutStderrBytes, err := cmd.CombinedOutput()
+ require.NoError(ht, err, "failed to run 'lnd --debuglevel=show'")
+
+ // Make sure that the output contains the list of supported subsystems
+ // and that the list is not empty. We search PEER subsystem.
+ stdoutStderr := string(stdoutStderrBytes)
+ require.Contains(ht, stdoutStderr, "Supported subsystems")
+ require.Contains(ht, stdoutStderr, "PEER")
+}
diff --git a/itest/list_exclude_test.go b/itest/list_exclude_test.go
new file mode 100644
index 0000000000..1b08ba6f15
--- /dev/null
+++ b/itest/list_exclude_test.go
@@ -0,0 +1,96 @@
+//go:build integration
+
+package itest
+
+import (
+ "fmt"
+
+ "github.com/lightningnetwork/lnd/fn"
+ "github.com/lightningnetwork/lnd/lntest"
+)
+
+// excludedTestsWindows is a list of tests that are flaky on Windows and should
+// be excluded from the test suite atm.
+//
+// TODO(yy): fix these tests and remove them from this list.
+var excludedTestsWindows = []string{
+ "batch channel funding",
+ "zero conf channel open",
+ "open channel with unstable utxos",
+ "funding flow persistence",
+
+ "listsweeps",
+ "sweep htlcs",
+ "sweep cpfp anchor incoming timeout",
+ "payment succeeded htlc remote swept",
+ "3rd party anchor spend",
+
+ "send payment amp",
+ "async payments benchmark",
+ "async bidirectional payments",
+
+ "multihop htlc aggregation leased",
+ "multihop htlc aggregation leased zero conf",
+ "multihop htlc aggregation anchor",
+ "multihop htlc aggregation anchor zero conf",
+ "multihop htlc aggregation simple taproot",
+ "multihop htlc aggregation simple taproot zero conf",
+
+ "channel force closure anchor",
+ "channel force closure simple taproot",
+ "channel backup restore force close",
+ "wipe forwarding packages",
+
+ "coop close with htlcs",
+ "coop close with external delivery",
+
+ "forward interceptor restart",
+ "forward interceptor dedup htlcs",
+ "invoice HTLC modifier basic",
+ "lookup htlc resolution",
+
+ "remote signer taproot",
+ "remote signer account import",
+ "remote signer bump fee",
+ "remote signer funding input types",
+ "remote signer funding async payments taproot",
+ "remote signer funding async payments",
+ "remote signer random seed",
+ "remote signer verify msg",
+ "remote signer channel open",
+ "remote signer shared key",
+ "remote signer psbt",
+ "remote signer sign output raw",
+
+ "on chain to blinded",
+ "query blinded route",
+
+ "data loss protection",
+}
+
+// filterWindowsFlakyTests filters out the flaky tests that are excluded from
+// the test suite on Windows.
+func filterWindowsFlakyTests() []*lntest.TestCase {
+ filteredTestCases := make([]*lntest.TestCase, 0, len(allTestCases))
+
+ excludedSet := fn.NewSet(excludedTestsWindows...)
+ for _, tc := range allTestCases {
+ if excludedSet.Contains(tc.Name) {
+ excludedSet.Remove(tc.Name)
+
+ continue
+ }
+
+ filteredTestCases = append(filteredTestCases, tc)
+ }
+
+ if excludedSet.IsEmpty() {
+ return filteredTestCases
+ }
+
+ for _, name := range excludedSet.ToSlice() {
+ fmt.Println("Test not found in test suite:", name)
+ }
+
+ panic("excluded tests not found in test suite")
+}
diff --git a/itest/list_on_test.go b/itest/list_on_test.go
index c5fa7cebf3..deb05cd527 100644
--- a/itest/list_on_test.go
+++ b/itest/list_on_test.go
@@ -2,37 +2,23 @@
package itest
-import "github.com/lightningnetwork/lnd/lntest"
+import (
+ "github.com/lightningnetwork/lnd/lntest"
+)
var allTestCases = []*lntest.TestCase{
{
Name: "update channel status",
TestFunc: testUpdateChanStatus,
},
- {
- Name: "basic funding flow",
- TestFunc: testBasicChannelFunding,
- },
- {
- Name: "multi hop receiver chain claim",
- TestFunc: testMultiHopReceiverChainClaim,
- },
{
Name: "external channel funding",
TestFunc: testExternalFundingChanPoint,
},
- {
- Name: "channel backup restore basic",
- TestFunc: testChannelBackupRestoreBasic,
- },
{
Name: "channel backup restore unconfirmed",
TestFunc: testChannelBackupRestoreUnconfirmed,
},
- {
- Name: "channel backup restore commit types",
- TestFunc: testChannelBackupRestoreCommitTypes,
- },
{
Name: "channel backup restore force close",
TestFunc: testChannelBackupRestoreForceClose,
@@ -153,18 +139,6 @@ var allTestCases = []*lntest.TestCase{
Name: "addpeer config",
TestFunc: testAddPeerConfig,
},
- {
- Name: "multi hop htlc local timeout",
- TestFunc: testMultiHopHtlcLocalTimeout,
- },
- {
- Name: "multi hop local force close on-chain htlc timeout",
- TestFunc: testMultiHopLocalForceCloseOnChainHtlcTimeout,
- },
- {
- Name: "multi hop remote force close on-chain htlc timeout",
- TestFunc: testMultiHopRemoteForceCloseOnChainHtlcTimeout,
- },
{
Name: "private channel update policy",
TestFunc: testUpdateChannelPolicyForPrivateChannel,
@@ -226,11 +200,15 @@ var allTestCases = []*lntest.TestCase{
TestFunc: testChannelUnsettledBalance,
},
{
- Name: "channel force closure",
- TestFunc: testChannelForceClosure,
+ Name: "channel force closure anchor",
+ TestFunc: testChannelForceClosureAnchor,
+ },
+ {
+ Name: "channel force closure simple taproot",
+ TestFunc: testChannelForceClosureSimpleTaproot,
},
{
- Name: "failing link",
+ Name: "failing channel",
TestFunc: testFailingChannel,
},
{
@@ -293,10 +271,6 @@ var allTestCases = []*lntest.TestCase{
Name: "open channel reorg test",
TestFunc: testOpenChannelAfterReorg,
},
- {
- Name: "psbt channel funding",
- TestFunc: testPsbtChanFunding,
- },
{
Name: "sign psbt",
TestFunc: testSignPsbt,
@@ -313,18 +287,6 @@ var allTestCases = []*lntest.TestCase{
Name: "REST API",
TestFunc: testRestAPI,
},
- {
- Name: "multi hop htlc local chain claim",
- TestFunc: testMultiHopHtlcLocalChainClaim,
- },
- {
- Name: "multi hop htlc remote chain claim",
- TestFunc: testMultiHopHtlcRemoteChainClaim,
- },
- {
- Name: "multi hop htlc aggregation",
- TestFunc: testMultiHopHtlcAggregation,
- },
{
Name: "revoked uncooperative close retribution",
TestFunc: testRevokedCloseRetribution,
@@ -338,10 +300,6 @@ var allTestCases = []*lntest.TestCase{
Name: "revoked uncooperative close retribution remote hodl",
TestFunc: testRevokedCloseRetributionRemoteHodl,
},
- {
- Name: "single-hop send to route",
- TestFunc: testSingleHopSendToRoute,
- },
{
Name: "multi-hop send to route",
TestFunc: testMultiHopSendToRoute,
@@ -478,10 +436,6 @@ var allTestCases = []*lntest.TestCase{
Name: "option scid alias",
TestFunc: testOptionScidAlias,
},
- {
- Name: "scid alias channel update",
- TestFunc: testUpdateChannelPolicyScidAlias,
- },
{
Name: "scid alias upgrade",
TestFunc: testOptionScidUpgrade,
@@ -515,16 +469,20 @@ var allTestCases = []*lntest.TestCase{
TestFunc: testBumpForceCloseFee,
},
{
- Name: "taproot",
- TestFunc: testTaproot,
+ Name: "taproot spend",
+ TestFunc: testTaprootSpend,
},
{
- Name: "simple taproot channel activation",
- TestFunc: testSimpleTaprootChannelActivation,
+ Name: "taproot musig2",
+ TestFunc: testTaprootMuSig2,
+ },
+ {
+ Name: "taproot import scripts",
+ TestFunc: testTaprootImportScripts,
},
{
- Name: "wallet import account",
- TestFunc: testWalletImportAccount,
+ Name: "simple taproot channel activation",
+ TestFunc: testSimpleTaprootChannelActivation,
},
{
Name: "wallet import pubkey",
@@ -534,10 +492,6 @@ var allTestCases = []*lntest.TestCase{
Name: "async payments benchmark",
TestFunc: testAsyncPayments,
},
- {
- Name: "remote signer",
- TestFunc: testRemoteSigner,
- },
{
Name: "taproot coop close",
TestFunc: testTaprootCoopClose,
@@ -550,10 +504,6 @@ var allTestCases = []*lntest.TestCase{
Name: "trackpayments compatible",
TestFunc: testTrackPaymentsCompatible,
},
- {
- Name: "open channel fee policy",
- TestFunc: testOpenChannelUpdateFeePolicy,
- },
{
Name: "custom message",
TestFunc: testCustomMessage,
@@ -575,12 +525,16 @@ var allTestCases = []*lntest.TestCase{
TestFunc: testLookupHtlcResolution,
},
{
- Name: "watchtower",
- TestFunc: testWatchtower,
+ Name: "channel fundmax error",
+ TestFunc: testChannelFundMaxError,
+ },
+ {
+ Name: "channel fundmax wallet amount",
+ TestFunc: testChannelFundMaxWalletAmount,
},
{
- Name: "channel fundmax",
- TestFunc: testChannelFundMax,
+ Name: "channel fundmax anchor reserve",
+ TestFunc: testChannelFundMaxAnchorReserve,
},
{
Name: "htlc timeout resolver extract preimage remote",
@@ -595,12 +549,12 @@ var allTestCases = []*lntest.TestCase{
TestFunc: testCustomFeatures,
},
{
- Name: "utxo selection funding",
- TestFunc: testChannelUtxoSelection,
+ Name: "update pending open channels on funder side",
+ TestFunc: testUpdateOnFunderPendingOpenChannels,
},
{
- Name: "update pending open channels",
- TestFunc: testUpdateOnPendingOpenChannels,
+ Name: "update pending open channels on fundee side",
+ TestFunc: testUpdateOnFundeePendingOpenChannels,
},
{
Name: "blinded payment htlc re-forward",
@@ -694,6 +648,10 @@ var allTestCases = []*lntest.TestCase{
Name: "payment failed htlc local swept",
TestFunc: testPaymentFailedHTLCLocalSwept,
},
+ {
+ Name: "payment failed htlc local swept resumed",
+ TestFunc: testPaymentFailedHTLCLocalSweptResumed,
+ },
{
Name: "payment succeeded htlc remote swept",
TestFunc: testPaymentSucceededHTLCRemoteSwept,
@@ -702,4 +660,32 @@ var allTestCases = []*lntest.TestCase{
Name: "send to route failed htlc timeout",
TestFunc: testSendToRouteFailHTLCTimeout,
},
+ {
+ Name: "send to route failed htlc timeout resumed",
+ TestFunc: testSendToRouteFailHTLCTimeoutResumed,
+ },
+ {
+ Name: "debuglevel show",
+ TestFunc: testDebuglevelShow,
+ },
+}
+
+func init() {
+ // Register subtests.
+ allTestCases = append(allTestCases, multiHopForceCloseTestCases...)
+ allTestCases = append(allTestCases, watchtowerTestCases...)
+ allTestCases = append(allTestCases, psbtFundingTestCases...)
+ allTestCases = append(allTestCases, remoteSignerTestCases...)
+ allTestCases = append(allTestCases, channelRestoreTestCases...)
+ allTestCases = append(allTestCases, fundUtxoSelectionTestCases...)
+ allTestCases = append(allTestCases, zeroConfPolicyTestCases...)
+ allTestCases = append(allTestCases, channelFeePolicyTestCases...)
+ allTestCases = append(allTestCases, walletImportAccountTestCases...)
+ allTestCases = append(allTestCases, basicFundingTestCases...)
+ allTestCases = append(allTestCases, sendToRouteTestCases...)
+
+ // If this is Windows, we'll skip running some of the flaky tests.
+ if isWindowsOS() {
+ allTestCases = filterWindowsFlakyTests()
+ }
}
diff --git a/itest/lnd_channel_backup_test.go b/itest/lnd_channel_backup_test.go
index 5ae0df9584..6c41cd5577 100644
--- a/itest/lnd_channel_backup_test.go
+++ b/itest/lnd_channel_backup_test.go
@@ -23,6 +23,90 @@ import (
"github.com/stretchr/testify/require"
)
+// channelRestoreTestCases contains the test cases for the channel restore
+// scenario.
+var channelRestoreTestCases = []*lntest.TestCase{
+ {
+ // Restore the backup from the on-disk file, using the RPC
+ // interface, for anchor commitment channels.
+ Name: "channel backup restore anchor",
+ TestFunc: func(ht *lntest.HarnessTest) {
+ runChanRestoreScenarioCommitTypes(
+ ht, lnrpc.CommitmentType_ANCHORS, false,
+ )
+ },
+ },
+ {
+ // Restore the backup from the on-disk file, using the RPC
+ // interface, for script-enforced leased channels.
+ Name: "channel backup restore leased",
+ TestFunc: func(ht *lntest.HarnessTest) {
+ runChanRestoreScenarioCommitTypes(
+ ht, leasedType, false,
+ )
+ },
+ },
+ {
+ // Restore the backup from the on-disk file, using the RPC
+ // interface, for zero-conf anchor channels.
+ Name: "channel backup restore anchor zero conf",
+ TestFunc: func(ht *lntest.HarnessTest) {
+ runChanRestoreScenarioCommitTypes(
+ ht, lnrpc.CommitmentType_ANCHORS, true,
+ )
+ },
+ },
+ {
+ // Restore the backup from the on-disk file, using the RPC
+ // interface for a zero-conf script-enforced leased channel.
+ Name: "channel backup restore leased zero conf",
+ TestFunc: func(ht *lntest.HarnessTest) {
+ runChanRestoreScenarioCommitTypes(
+ ht, leasedType, true,
+ )
+ },
+ },
+ {
+ // Restore a channel back up of a taproot channel that was
+ // confirmed.
+ Name: "channel backup restore simple taproot",
+ TestFunc: func(ht *lntest.HarnessTest) {
+ runChanRestoreScenarioCommitTypes(
+ ht, lnrpc.CommitmentType_SIMPLE_TAPROOT, false,
+ )
+ },
+ },
+ {
+ // Restore a channel back up of an unconfirmed taproot channel.
+ Name: "channel backup restore simple taproot zero conf",
+ TestFunc: func(ht *lntest.HarnessTest) {
+ runChanRestoreScenarioCommitTypes(
+ ht, lnrpc.CommitmentType_SIMPLE_TAPROOT, true,
+ )
+ },
+ },
+ {
+ Name: "channel backup restore from rpc",
+ TestFunc: testChannelBackupRestoreFromRPC,
+ },
+ {
+ Name: "channel backup restore from file",
+ TestFunc: testChannelBackupRestoreFromFile,
+ },
+ {
+ Name: "channel backup restore during creation",
+ TestFunc: testChannelBackupRestoreDuringCreation,
+ },
+ {
+ Name: "channel backup restore during unlock",
+ TestFunc: testChannelBackupRestoreDuringUnlock,
+ },
+ {
+ Name: "channel backup restore twice",
+ TestFunc: testChannelBackupRestoreTwice,
+ },
+}
+
type (
// nodeRestorer is a function closure that allows each test case to
// control exactly *how* the prior node is restored. This might be
@@ -234,202 +318,167 @@ func (c *chanRestoreScenario) testScenario(ht *lntest.HarnessTest,
)
}
-// testChannelBackupRestore tests that we're able to recover from, and initiate
-// the DLP protocol via: the RPC restore command, restoring on unlock, and
-// restoring from initial wallet creation. We'll also alternate between
-// restoring form the on disk file, and restoring from the exported RPC command
-// as well.
-func testChannelBackupRestoreBasic(ht *lntest.HarnessTest) {
- var testCases = []struct {
- name string
- restoreMethod restoreMethodType
- }{
- // Restore from backups obtained via the RPC interface. Dave
- // was the initiator, of the non-advertised channel.
- {
- name: "restore from RPC backup",
- restoreMethod: func(st *lntest.HarnessTest,
- oldNode *node.HarnessNode,
- backupFilePath string,
- password []byte,
- mnemonic []string) nodeRestorer {
-
- // For this restoration method, we'll grab the
- // current multi-channel backup from the old
- // node, and use it to restore a new node
- // within the closure.
- chanBackup := oldNode.RPC.ExportAllChanBackups()
-
- multi := chanBackup.MultiChanBackup.
- MultiChanBackup
-
- // In our nodeRestorer function, we'll restore
- // the node from seed, then manually recover
- // the channel backup.
- return chanRestoreViaRPC(
- st, password, mnemonic, multi,
- )
- },
- },
+// testChannelBackupRestoreFromRPC tests that we're able to recover from, and
+// initiate the DLP protocol via the RPC restore command.
+func testChannelBackupRestoreFromRPC(ht *lntest.HarnessTest) {
+ // Restore from backups obtained via the RPC interface. Dave was the
+ // initiator, of the non-advertised channel.
+ restoreMethod := func(st *lntest.HarnessTest, oldNode *node.HarnessNode,
+ backupFilePath string, password []byte,
+ mnemonic []string) nodeRestorer {
- // Restore the backup from the on-disk file, using the RPC
- // interface.
- {
- name: "restore from backup file",
- restoreMethod: func(st *lntest.HarnessTest,
- oldNode *node.HarnessNode,
- backupFilePath string,
- password []byte,
- mnemonic []string) nodeRestorer {
-
- // Read the entire Multi backup stored within
- // this node's channel.backup file.
- multi, err := os.ReadFile(backupFilePath)
- require.NoError(st, err)
-
- // Now that we have Dave's backup file, we'll
- // create a new nodeRestorer that will restore
- // using the on-disk channel.backup.
- return chanRestoreViaRPC(
- st, password, mnemonic, multi,
- )
- },
- },
+ // For this restoration method, we'll grab the current
+ // multi-channel backup from the old node, and use it to
+ // restore a new node within the closure.
+ chanBackup := oldNode.RPC.ExportAllChanBackups()
- // Restore the backup as part of node initialization with the
- // prior mnemonic and new backup seed.
- {
- name: "restore during creation",
- restoreMethod: func(st *lntest.HarnessTest,
- oldNode *node.HarnessNode,
- backupFilePath string,
- password []byte,
- mnemonic []string) nodeRestorer {
-
- // First, fetch the current backup state as is,
- // to obtain our latest Multi.
- chanBackup := oldNode.RPC.ExportAllChanBackups()
- backupSnapshot := &lnrpc.ChanBackupSnapshot{
- MultiChanBackup: chanBackup.
- MultiChanBackup,
- }
+ multi := chanBackup.MultiChanBackup.
+ MultiChanBackup
- // Create a new nodeRestorer that will restore
- // the node using the Multi backup we just
- // obtained above.
- return func() *node.HarnessNode {
- return st.RestoreNodeWithSeed(
- "dave", nil, password, mnemonic,
- "", revocationWindow,
- backupSnapshot,
- )
- }
- },
- },
+ // In our nodeRestorer function, we'll restore the node from
+ // seed, then manually recover the channel backup.
+ return chanRestoreViaRPC(
+ st, password, mnemonic, multi,
+ )
+ }
- // Restore the backup once the node has already been
- // re-created, using the Unlock call.
- {
- name: "restore during unlock",
- restoreMethod: func(st *lntest.HarnessTest,
- oldNode *node.HarnessNode,
- backupFilePath string,
- password []byte,
- mnemonic []string) nodeRestorer {
-
- // First, fetch the current backup state as is,
- // to obtain our latest Multi.
- chanBackup := oldNode.RPC.ExportAllChanBackups()
- backupSnapshot := &lnrpc.ChanBackupSnapshot{
- MultiChanBackup: chanBackup.
- MultiChanBackup,
- }
+ runChanRestoreScenarioBasic(ht, restoreMethod)
+}
- // Create a new nodeRestorer that will restore
- // the node with its seed, but no channel
- // backup, shutdown this initialized node, then
- // restart it again using Unlock.
- return func() *node.HarnessNode {
- newNode := st.RestoreNodeWithSeed(
- "dave", nil, password, mnemonic,
- "", revocationWindow, nil,
- )
- st.RestartNodeWithChanBackups(
- newNode, backupSnapshot,
- )
-
- return newNode
- }
- },
- },
+// testChannelBackupRestoreFromFile tests that we're able to recover from, and
+// initiate the DLP protocol via the backup file.
+func testChannelBackupRestoreFromFile(ht *lntest.HarnessTest) {
+ // Restore the backup from the on-disk file, using the RPC interface.
+ restoreMethod := func(st *lntest.HarnessTest, oldNode *node.HarnessNode,
+ backupFilePath string, password []byte,
+ mnemonic []string) nodeRestorer {
- // Restore the backup from the on-disk file a second time to
- // make sure imports can be canceled and later resumed.
- {
- name: "restore from backup file twice",
- restoreMethod: func(st *lntest.HarnessTest,
- oldNode *node.HarnessNode,
- backupFilePath string,
- password []byte,
- mnemonic []string) nodeRestorer {
-
- // Read the entire Multi backup stored within
- // this node's channel.backup file.
- multi, err := os.ReadFile(backupFilePath)
- require.NoError(st, err)
-
- // Now that we have Dave's backup file, we'll
- // create a new nodeRestorer that will restore
- // using the on-disk channel.backup.
- //
- //nolint:lll
- backup := &lnrpc.RestoreChanBackupRequest_MultiChanBackup{
- MultiChanBackup: multi,
- }
+ // Read the entire Multi backup stored within this node's
+ // channel.backup file.
+ multi, err := os.ReadFile(backupFilePath)
+ require.NoError(st, err)
+
+ // Now that we have Dave's backup file, we'll create a new
+ // nodeRestorer that will restore using the on-disk
+ // channel.backup.
+ return chanRestoreViaRPC(
+ st, password, mnemonic, multi,
+ )
+ }
- return func() *node.HarnessNode {
- newNode := st.RestoreNodeWithSeed(
- "dave", nil, password, mnemonic,
- "", revocationWindow, nil,
- )
+ runChanRestoreScenarioBasic(ht, restoreMethod)
+}
- req := &lnrpc.RestoreChanBackupRequest{
- Backup: backup,
- }
- res := newNode.RPC.RestoreChanBackups(
- req,
- )
- require.EqualValues(
- st, 1, res.NumRestored,
- )
-
- req = &lnrpc.RestoreChanBackupRequest{
- Backup: backup,
- }
- res = newNode.RPC.RestoreChanBackups(
- req,
- )
- require.EqualValues(
- st, 0, res.NumRestored,
- )
-
- return newNode
- }
- },
- },
+// testChannelBackupRestoreFromFile tests that we're able to recover from, and
+// initiate the DLP protocol via restoring from initial wallet creation.
+func testChannelBackupRestoreDuringCreation(ht *lntest.HarnessTest) {
+ // Restore the backup as part of node initialization with the prior
+ // mnemonic and new backup seed.
+ restoreMethod := func(st *lntest.HarnessTest, oldNode *node.HarnessNode,
+ backupFilePath string, password []byte,
+ mnemonic []string) nodeRestorer {
+
+ // First, fetch the current backup state as is, to obtain our
+ // latest Multi.
+ chanBackup := oldNode.RPC.ExportAllChanBackups()
+ backupSnapshot := &lnrpc.ChanBackupSnapshot{
+ MultiChanBackup: chanBackup.
+ MultiChanBackup,
+ }
+
+ // Create a new nodeRestorer that will restore the node using
+ // the Multi backup we just obtained above.
+ return func() *node.HarnessNode {
+ return st.RestoreNodeWithSeed(
+ "dave", nil, password, mnemonic,
+ "", revocationWindow,
+ backupSnapshot,
+ )
+ }
+ }
+
+ runChanRestoreScenarioBasic(ht, restoreMethod)
+}
+
+// testChannelBackupRestoreFromFile tests that we're able to recover from, and
+// initiate the DLP protocol via restoring on unlock.
+func testChannelBackupRestoreDuringUnlock(ht *lntest.HarnessTest) {
+ // Restore the backup once the node has already been re-created, using
+ // the Unlock call.
+ restoreMethod := func(st *lntest.HarnessTest, oldNode *node.HarnessNode,
+ backupFilePath string, password []byte,
+ mnemonic []string) nodeRestorer {
+
+ // First, fetch the current backup state as is, to obtain our
+ // latest Multi.
+ chanBackup := oldNode.RPC.ExportAllChanBackups()
+ backupSnapshot := &lnrpc.ChanBackupSnapshot{
+ MultiChanBackup: chanBackup.
+ MultiChanBackup,
+ }
+
+ // Create a new nodeRestorer that will restore the node with
+ // its seed, but no channel backup, shutdown this initialized
+ // node, then restart it again using Unlock.
+ return func() *node.HarnessNode {
+ newNode := st.RestoreNodeWithSeed(
+ "dave", nil, password, mnemonic,
+ "", revocationWindow, nil,
+ )
+ st.RestartNodeWithChanBackups(
+ newNode, backupSnapshot,
+ )
+
+ return newNode
+ }
}
- for _, testCase := range testCases {
- tc := testCase
- success := ht.Run(tc.name, func(t *testing.T) {
- h := ht.Subtest(t)
+ runChanRestoreScenarioBasic(ht, restoreMethod)
+}
+
+// testChannelBackupRestoreTwice tests that we're able to recover from, and
+// initiate the DLP protocol twice by alternating between restoring form the on
+// disk file, and restoring from the exported RPC command
+func testChannelBackupRestoreTwice(ht *lntest.HarnessTest) {
+ // Restore the backup from the on-disk file a second time to make sure
+ // imports can be canceled and later resumed.
+ restoreMethod := func(st *lntest.HarnessTest, oldNode *node.HarnessNode,
+ backupFilePath string, password []byte,
+ mnemonic []string) nodeRestorer {
+
+ // Read the entire Multi backup stored within this node's
+ // channel.backup file.
+ multi, err := os.ReadFile(backupFilePath)
+ require.NoError(st, err)
+
+ // Now that we have Dave's backup file, we'll create a new
+ // nodeRestorer that will restore using the on-disk
+ // channel.backup.
+ backup := &lnrpc.RestoreChanBackupRequest_MultiChanBackup{
+ MultiChanBackup: multi,
+ }
+
+ return func() *node.HarnessNode {
+ newNode := st.RestoreNodeWithSeed(
+ "dave", nil, password, mnemonic,
+ "", revocationWindow, nil,
+ )
+
+ req := &lnrpc.RestoreChanBackupRequest{
+ Backup: backup,
+ }
+ newNode.RPC.RestoreChanBackups(req)
+
+ req = &lnrpc.RestoreChanBackupRequest{
+ Backup: backup,
+ }
+ newNode.RPC.RestoreChanBackups(req)
- runChanRestoreScenarioBasic(h, tc.restoreMethod)
- })
- if !success {
- break
+ return newNode
}
}
+
+ runChanRestoreScenarioBasic(ht, restoreMethod)
}
// runChanRestoreScenarioBasic executes a given test case from end to end,
@@ -540,79 +589,6 @@ func runChanRestoreScenarioUnConfirmed(ht *lntest.HarnessTest, useFile bool) {
crs.testScenario(ht, restoredNodeFunc)
}
-// testChannelBackupRestoreCommitTypes tests that we're able to recover from,
-// and initiate the DLP protocol for different channel commitment types and
-// zero-conf channel.
-func testChannelBackupRestoreCommitTypes(ht *lntest.HarnessTest) {
- var testCases = []struct {
- name string
- ct lnrpc.CommitmentType
- zeroConf bool
- }{
- // Restore the backup from the on-disk file, using the RPC
- // interface, for anchor commitment channels.
- {
- name: "restore from backup file anchors",
- ct: lnrpc.CommitmentType_ANCHORS,
- },
-
- // Restore the backup from the on-disk file, using the RPC
- // interface, for script-enforced leased channels.
- {
- name: "restore from backup file script " +
- "enforced lease",
- ct: lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE,
- },
-
- // Restore the backup from the on-disk file, using the RPC
- // interface, for zero-conf anchor channels.
- {
- name: "restore from backup file for zero-conf " +
- "anchors channel",
- ct: lnrpc.CommitmentType_ANCHORS,
- zeroConf: true,
- },
-
- // Restore the backup from the on-disk file, using the RPC
- // interface for a zero-conf script-enforced leased channel.
- {
- name: "restore from backup file zero-conf " +
- "script-enforced leased channel",
- ct: lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE,
- zeroConf: true,
- },
-
- // Restore a channel back up of a taproot channel that was
- // confirmed.
- {
- name: "restore from backup taproot",
- ct: lnrpc.CommitmentType_SIMPLE_TAPROOT,
- zeroConf: false,
- },
-
- // Restore a channel back up of an unconfirmed taproot channel.
- {
- name: "restore from backup taproot zero conf",
- ct: lnrpc.CommitmentType_SIMPLE_TAPROOT,
- zeroConf: true,
- },
- }
-
- for _, testCase := range testCases {
- tc := testCase
- success := ht.Run(tc.name, func(t *testing.T) {
- h := ht.Subtest(t)
-
- runChanRestoreScenarioCommitTypes(
- h, tc.ct, tc.zeroConf,
- )
- })
- if !success {
- break
- }
- }
-}
-
// runChanRestoreScenarioCommitTypes tests that the DLP is applied for
// different channel commitment types and zero-conf channel.
func runChanRestoreScenarioCommitTypes(ht *lntest.HarnessTest,
@@ -844,7 +820,7 @@ func runChanRestoreScenarioForceClose(ht *lntest.HarnessTest, zeroConf bool) {
// and the on-disk channel.backup are updated each time a channel is
// opened/closed.
func testChannelBackupUpdates(ht *lntest.HarnessTest) {
- alice := ht.Alice
+ alice := ht.NewNodeWithCoins("Alice", nil)
// First, we'll make a temp directory that we'll use to store our
// backup file, so we can check in on it during the test easily.
@@ -1052,7 +1028,7 @@ func testExportChannelBackup(ht *lntest.HarnessTest) {
// With Carol up, we'll now connect her to Alice, and open a channel
// between them.
- alice := ht.Alice
+ alice := ht.NewNodeWithCoins("Alice", nil)
ht.ConnectNodes(carol, alice)
// Next, we'll open two channels between Alice and Carol back to back.
@@ -1320,12 +1296,20 @@ func testDataLossProtection(ht *lntest.HarnessTest) {
// information Dave needs to sweep his funds.
require.NoError(ht, restartDave(), "unable to restart Eve")
+ // Mine a block to trigger Dave's chain watcher to process Carol's sweep
+ // tx.
+ //
+ // TODO(yy): remove this block once the blockbeat starts remembering
+ // its last processed block and can handle looking for spends in the
+ // past blocks.
+ ht.MineEmptyBlocks(1)
+
+ // Make sure Dave still has the pending force close channel.
+ ht.AssertNumPendingForceClose(dave, 1)
+
// Dave should have a pending sweep.
ht.AssertNumPendingSweeps(dave, 1)
- // Mine a block to trigger the sweep.
- ht.MineBlocks(1)
-
// Dave should sweep his funds.
ht.AssertNumTxsInMempool(1)
@@ -1482,7 +1466,6 @@ func assertTimeLockSwept(ht *lntest.HarnessTest, carol, dave *node.HarnessNode,
expectedTxes := 1
// Mine a block to trigger the sweeps.
- ht.MineBlocks(1)
ht.AssertNumTxsInMempool(expectedTxes)
// Carol should consider the channel pending force close (since she is
@@ -1512,11 +1495,10 @@ func assertTimeLockSwept(ht *lntest.HarnessTest, carol, dave *node.HarnessNode,
// The commit sweep resolver publishes the sweep tx at defaultCSV-1 and
// we already mined one block after the commitment was published, and
// one block to trigger Carol's sweeps, so take that into account.
- ht.MineEmptyBlocks(1)
+ ht.MineBlocks(2)
ht.AssertNumPendingSweeps(dave, 2)
// Mine a block to trigger the sweeps.
- ht.MineEmptyBlocks(1)
daveSweep := ht.AssertNumTxsInMempool(1)[0]
block := ht.MineBlocksAndAssertNumTxes(1, 1)[0]
ht.AssertTxInBlock(block, daveSweep)
@@ -1615,8 +1597,6 @@ func assertDLPExecuted(ht *lntest.HarnessTest,
// output and the other for her anchor.
ht.AssertNumPendingSweeps(carol, 2)
- // Mine a block to trigger the sweep.
- ht.MineEmptyBlocks(1)
ht.MineBlocksAndAssertNumTxes(1, 1)
// Now the channel should be fully closed also from Carol's POV.
@@ -1635,8 +1615,6 @@ func assertDLPExecuted(ht *lntest.HarnessTest,
// output and the other for his anchor.
ht.AssertNumPendingSweeps(dave, 2)
- // Mine a block to trigger the sweep.
- ht.MineEmptyBlocks(1)
ht.MineBlocksAndAssertNumTxes(1, 1)
// Now Dave should consider the channel fully closed.
@@ -1652,10 +1630,6 @@ func assertDLPExecuted(ht *lntest.HarnessTest,
ht.AssertNumPendingSweeps(dave, 1)
}
- // Mine one block to trigger the sweeper to sweep.
- ht.MineEmptyBlocks(1)
- blocksMined++
-
// Expect one tx - the commitment sweep from Dave. For anchor
// channels, we expect the two anchor sweeping txns to be
// failed due they are uneconomical.
@@ -1673,9 +1647,6 @@ func assertDLPExecuted(ht *lntest.HarnessTest,
// commitmment was published, so take that into account.
ht.MineEmptyBlocks(int(defaultCSV - blocksMined))
- // Mine one block to trigger the sweeper to sweep.
- ht.MineEmptyBlocks(1)
-
// Carol should have two pending sweeps:
// 1. her commit output.
// 2. her anchor output, if this is anchor channel.
diff --git a/itest/lnd_channel_balance_test.go b/itest/lnd_channel_balance_test.go
index 72dd16ea34..f723b0492d 100644
--- a/itest/lnd_channel_balance_test.go
+++ b/itest/lnd_channel_balance_test.go
@@ -48,7 +48,8 @@ func testChannelBalance(ht *lntest.HarnessTest) {
}
// Before beginning, make sure alice and bob are connected.
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNode("Bob", nil)
ht.EnsureConnected(alice, bob)
chanPoint := ht.OpenChannel(
@@ -62,10 +63,6 @@ func testChannelBalance(ht *lntest.HarnessTest) {
// Ensure Bob currently has no available balance within the channel.
checkChannelBalance(bob, 0, amount-lntest.CalcStaticFee(cType, 0))
-
- // Finally close the channel between Alice and Bob, asserting that the
- // channel has been properly closed on-chain.
- ht.CloseChannel(alice, chanPoint)
}
// testChannelUnsettledBalance will test that the UnsettledBalance field
@@ -118,7 +115,7 @@ func testChannelUnsettledBalance(ht *lntest.HarnessTest) {
carol := ht.NewNode("Carol", []string{"--hodl.exit-settle"})
// Connect Alice to Carol.
- alice := ht.Alice
+ alice := ht.NewNodeWithCoins("Alice", nil)
ht.ConnectNodes(alice, carol)
// Open a channel between Alice and Carol.
@@ -156,7 +153,7 @@ func testChannelUnsettledBalance(ht *lntest.HarnessTest) {
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
}
- alice.RPC.SendPayment(req)
+ ht.SendPaymentAssertInflight(alice, req)
}()
}
@@ -207,7 +204,4 @@ func testChannelUnsettledBalance(ht *lntest.HarnessTest) {
// balance that equals to the amount of invoices * payAmt. The local
// balance remains zero.
checkChannelBalance(carol, 0, aliceLocal, numInvoices*payAmt, 0)
-
- // Force and assert the channel closure.
- ht.ForceCloseChannel(alice, chanPointAlice)
}
diff --git a/itest/lnd_channel_force_close_test.go b/itest/lnd_channel_force_close_test.go
index 6d02804012..34a56a6a46 100644
--- a/itest/lnd_channel_force_close_test.go
+++ b/itest/lnd_channel_force_close_test.go
@@ -3,115 +3,97 @@ package itest
import (
"bytes"
"fmt"
- "testing"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd"
- "github.com/lightningnetwork/lnd/chainreg"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/node"
"github.com/lightningnetwork/lnd/lntest/wait"
- "github.com/lightningnetwork/lnd/lnwallet/chainfee"
"github.com/lightningnetwork/lnd/routing"
"github.com/stretchr/testify/require"
)
-// testChannelForceClosure performs a test to exercise the behavior of "force"
-// closing a channel or unilaterally broadcasting the latest local commitment
-// state on-chain. The test creates a new channel between Alice and Carol, then
-// force closes the channel after some cursory assertions. Within the test, a
-// total of 3 + n transactions will be broadcast, representing the commitment
-// transaction, a transaction sweeping the local CSV delayed output, a
-// transaction sweeping the CSV delayed 2nd-layer htlcs outputs, and n
-// htlc timeout transactions, where n is the number of payments Alice attempted
-// to send to Carol. This test includes several restarts to ensure that the
-// transaction output states are persisted throughout the forced closure
-// process.
-//
-// TODO(roasbeef): also add an unsettled HTLC before force closing.
-func testChannelForceClosure(ht *lntest.HarnessTest) {
- // We'll test the scenario for some of the commitment types, to ensure
- // outputs can be swept.
- commitTypes := []lnrpc.CommitmentType{
- lnrpc.CommitmentType_ANCHORS,
- lnrpc.CommitmentType_SIMPLE_TAPROOT,
+const pushAmt = btcutil.Amount(5e5)
+
+// testChannelForceClosureAnchor runs `runChannelForceClosureTest` with anchor
+// channels.
+func testChannelForceClosureAnchor(ht *lntest.HarnessTest) {
+ // Create a simple network: Alice -> Carol, using anchor channels.
+ //
+ // Prepare params.
+ openChannelParams := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ PushAmt: pushAmt,
+ CommitmentType: lnrpc.CommitmentType_ANCHORS,
}
- for _, channelType := range commitTypes {
- testName := fmt.Sprintf("committype=%v", channelType)
-
- channelType := channelType
- success := ht.Run(testName, func(t *testing.T) {
- st := ht.Subtest(t)
-
- args := lntest.NodeArgsForCommitType(channelType)
- alice := st.NewNode("Alice", args)
- defer st.Shutdown(alice)
-
- // Since we'd like to test failure scenarios with
- // outstanding htlcs, we'll introduce another node into
- // our test network: Carol.
- carolArgs := []string{"--hodl.exit-settle"}
- carolArgs = append(carolArgs, args...)
- carol := st.NewNode("Carol", carolArgs)
- defer st.Shutdown(carol)
-
- // Each time, we'll send Alice new set of coins in
- // order to fund the channel.
- st.FundCoins(btcutil.SatoshiPerBitcoin, alice)
-
- // NOTE: Alice needs 3 more UTXOs to sweep her
- // second-layer txns after a restart - after a restart
- // all the time-sensitive sweeps are swept immediately
- // without being aggregated.
- //
- // TODO(yy): remove this once the can recover its state
- // from restart.
- st.FundCoins(btcutil.SatoshiPerBitcoin, alice)
- st.FundCoins(btcutil.SatoshiPerBitcoin, alice)
- st.FundCoins(btcutil.SatoshiPerBitcoin, alice)
- st.FundCoins(btcutil.SatoshiPerBitcoin, alice)
-
- // Also give Carol some coins to allow her to sweep her
- // anchor.
- st.FundCoins(btcutil.SatoshiPerBitcoin, carol)
-
- channelForceClosureTest(st, alice, carol, channelType)
- })
- if !success {
- return
- }
+ cfg := node.CfgAnchor
+ cfgCarol := append(cfg, "--hodl.exit-settle")
+ cfgs := [][]string{cfg, cfgCarol}
+
+ runChannelForceClosureTest(ht, cfgs, openChannelParams)
+}
+
+// testChannelForceClosureSimpleTaproot runs `runChannelForceClosureTest` with
+// simple taproot channels.
+func testChannelForceClosureSimpleTaproot(ht *lntest.HarnessTest) {
+ // Create a simple network: Alice -> Carol, using simple taproot
+ // channels.
+ //
+ // Prepare params.
+ openChannelParams := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ PushAmt: pushAmt,
+ // If the channel is a taproot channel, then we'll need to
+ // create a private channel.
+ //
+ // TODO(roasbeef): lift after G175
+ CommitmentType: lnrpc.CommitmentType_SIMPLE_TAPROOT,
+ Private: true,
}
+
+ cfg := node.CfgSimpleTaproot
+ cfgCarol := append(cfg, "--hodl.exit-settle")
+ cfgs := [][]string{cfg, cfgCarol}
+
+ runChannelForceClosureTest(ht, cfgs, openChannelParams)
}
-func channelForceClosureTest(ht *lntest.HarnessTest,
- alice, carol *node.HarnessNode, channelType lnrpc.CommitmentType) {
+// runChannelForceClosureTest performs a test to exercise the behavior of
+// "force" closing a channel or unilaterally broadcasting the latest local
+// commitment state on-chain. The test creates a new channel between Alice and
+// Carol, then force closes the channel after some cursory assertions. Within
+// the test, a total of 3 + n transactions will be broadcast, representing the
+// commitment transaction, a transaction sweeping the local CSV delayed output,
+// a transaction sweeping the CSV delayed 2nd-layer htlcs outputs, and n htlc
+// timeout transactions, where n is the number of payments Alice attempted
+// to send to Carol. This test includes several restarts to ensure that the
+// transaction output states are persisted throughout the forced closure
+// process.
+func runChannelForceClosureTest(ht *lntest.HarnessTest,
+ cfgs [][]string, params lntest.OpenChannelParams) {
const (
- chanAmt = btcutil.Amount(10e6)
- pushAmt = btcutil.Amount(5e6)
- paymentAmt = 100000
- numInvoices = 6
+ numInvoices = 6
+ commitFeeRate = 20000
)
- const commitFeeRate = 20000
ht.SetFeeEstimate(commitFeeRate)
- // TODO(roasbeef): should check default value in config here
- // instead, or make delay a param
- defaultCLTV := uint32(chainreg.DefaultBitcoinTimeLockDelta)
-
- // We must let Alice have an open channel before she can send a node
- // announcement, so we open a channel with Carol,
- ht.ConnectNodes(alice, carol)
+ // Create a three hop network: Alice -> Carol.
+ chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params)
+ alice, carol := nodes[0], nodes[1]
+ chanPoint := chanPoints[0]
// We need one additional UTXO for sweeping the remote anchor.
- ht.FundCoins(btcutil.SatoshiPerBitcoin, alice)
+ if ht.IsNeutrinoBackend() {
+ ht.FundCoins(btcutil.SatoshiPerBitcoin, alice)
+ }
// Before we start, obtain Carol's current wallet balance, we'll check
// to ensure that at the end of the force closure by Alice, Carol
@@ -119,24 +101,6 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
carolBalResp := carol.RPC.WalletBalance()
carolStartingBalance := carolBalResp.ConfirmedBalance
- // If the channel is a taproot channel, then we'll need to create a
- // private channel.
- //
- // TODO(roasbeef): lift after G175
- var privateChan bool
- if channelType == lnrpc.CommitmentType_SIMPLE_TAPROOT {
- privateChan = true
- }
-
- chanPoint := ht.OpenChannel(
- alice, carol, lntest.OpenChannelParams{
- Private: privateChan,
- Amt: chanAmt,
- PushAmt: pushAmt,
- CommitmentType: channelType,
- },
- )
-
// Send payments from Alice to Carol, since Carol is htlchodl mode, the
// htlc outputs should be left unsettled, and should be swept by the
// utxo nursery.
@@ -146,11 +110,11 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
Dest: carolPubKey,
Amt: int64(paymentAmt),
PaymentHash: ht.Random32Bytes(),
- FinalCltvDelta: chainreg.DefaultBitcoinTimeLockDelta,
+ FinalCltvDelta: finalCltvDelta,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
}
- alice.RPC.SendPayment(req)
+ ht.SendPaymentAssertInflight(alice, req)
}
// Once the HTLC has cleared, all the nodes n our mini network should
@@ -163,13 +127,13 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
curHeight := int32(ht.CurrentHeight())
// Using the current height of the chain, derive the relevant heights
- // for incubating two-stage htlcs.
+ // for sweeping two-stage htlcs.
var (
startHeight = uint32(curHeight)
commCsvMaturityHeight = startHeight + 1 + defaultCSV
- htlcExpiryHeight = padCLTV(startHeight + defaultCLTV)
+ htlcExpiryHeight = padCLTV(startHeight + finalCltvDelta)
htlcCsvMaturityHeight = padCLTV(
- startHeight + defaultCLTV + 1 + defaultCSV,
+ startHeight + finalCltvDelta + 1 + defaultCSV,
)
)
@@ -200,21 +164,15 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
)
// The several restarts in this test are intended to ensure that when a
- // channel is force-closed, the UTXO nursery has persisted the state of
- // the channel in the closure process and will recover the correct
+ // channel is force-closed, the contract court has persisted the state
+ // of the channel in the closure process and will recover the correct
// state when the system comes back on line. This restart tests state
// persistence at the beginning of the process, when the commitment
// transaction has been broadcast but not yet confirmed in a block.
ht.RestartNode(alice)
- // To give the neutrino backend some time to catch up with the chain,
- // we wait here until we have enough UTXOs to actually sweep the local
- // and remote anchor.
- const expectedUtxos = 6
- ht.AssertNumUTXOs(alice, expectedUtxos)
-
// We expect to see Alice's force close tx in the mempool.
- ht.GetNumTxsFromMempool(1)
+ ht.AssertNumTxsInMempool(1)
// Mine a block which should confirm the commitment transaction
// broadcast as a result of the force closure. Once mined, we also
@@ -259,46 +217,34 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// The following restart is intended to ensure that outputs from the
// force close commitment transaction have been persisted once the
- // transaction has been confirmed, but before the outputs are spendable
- // (the "kindergarten" bucket.)
+ // transaction has been confirmed, but before the outputs are
+ // spendable.
ht.RestartNode(alice)
// Carol should offer her commit and anchor outputs to the sweeper.
sweepTxns := ht.AssertNumPendingSweeps(carol, 2)
- // Find Carol's anchor sweep.
+ // Identify Carol's pending sweeps.
var carolAnchor, carolCommit = sweepTxns[0], sweepTxns[1]
if carolAnchor.AmountSat != uint32(anchorSize) {
carolAnchor, carolCommit = carolCommit, carolAnchor
}
- // Mine a block to trigger Carol's sweeper to make decisions on the
- // anchor sweeping.
- ht.MineEmptyBlocks(1)
-
// Carol's sweep tx should be in the mempool already, as her output is
- // not timelocked.
+ // not timelocked. This sweep tx should spend her to_local output as
+ // the anchor output is not economical to spend.
carolTx := ht.GetNumTxsFromMempool(1)[0]
- // Carol's sweeping tx should have 2-input-1-output shape.
- require.Len(ht, carolTx.TxIn, 2)
+ // Carol's sweeping tx should have 1-input-1-output shape.
+ require.Len(ht, carolTx.TxIn, 1)
require.Len(ht, carolTx.TxOut, 1)
// Calculate the total fee Carol paid.
totalFeeCarol := ht.CalculateTxFee(carolTx)
- // If we have anchors, add an anchor resolution for carol.
- op := fmt.Sprintf("%v:%v", carolAnchor.Outpoint.TxidStr,
- carolAnchor.Outpoint.OutputIndex)
- carolReports[op] = &lnrpc.Resolution{
- ResolutionType: lnrpc.ResolutionType_ANCHOR,
- Outcome: lnrpc.ResolutionOutcome_CLAIMED,
- SweepTxid: carolTx.TxHash().String(),
- AmountSat: anchorSize,
- Outpoint: carolAnchor.Outpoint,
- }
-
- op = fmt.Sprintf("%v:%v", carolCommit.Outpoint.TxidStr,
+ // Carol's anchor report won't be created since it's uneconomical to
+ // sweep. So we expect to see only the commit sweep report.
+ op := fmt.Sprintf("%v:%v", carolCommit.Outpoint.TxidStr,
carolCommit.Outpoint.OutputIndex)
carolReports[op] = &lnrpc.Resolution{
ResolutionType: lnrpc.ResolutionType_COMMIT,
@@ -320,9 +266,9 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// Alice should still have the anchor sweeping request.
ht.AssertNumPendingSweeps(alice, 1)
- // The following restart checks to ensure that outputs in the
- // kindergarten bucket are persisted while waiting for the required
- // number of confirmations to be reported.
+ // The following restart checks to ensure that outputs in the contract
+ // court are persisted while waiting for the required number of
+ // confirmations to be reported.
ht.RestartNode(alice)
// Alice should see the channel in her set of pending force closed
@@ -345,12 +291,12 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
aliceBalance = forceClose.Channel.LocalBalance
// At this point, the nursery should show that the commitment
- // output has 2 block left before its CSV delay expires. In
+ // output has 3 block left before its CSV delay expires. In
// total, we have mined exactly defaultCSV blocks, so the htlc
// outputs should also reflect that this many blocks have
// passed.
err = checkCommitmentMaturity(
- forceClose, commCsvMaturityHeight, 2,
+ forceClose, commCsvMaturityHeight, 3,
)
if err != nil {
return err
@@ -369,9 +315,9 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
}, defaultTimeout)
require.NoError(ht, err, "timeout while checking force closed channel")
- // Generate an additional block, which should cause the CSV delayed
- // output from the commitment txn to expire.
- ht.MineEmptyBlocks(1)
+ // Generate two blocks, which should cause the CSV delayed output from
+ // the commitment txn to expire.
+ ht.MineBlocks(2)
// At this point, the CSV will expire in the next block, meaning that
// the output should be offered to the sweeper.
@@ -381,14 +327,9 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
commitSweep, anchorSweep = anchorSweep, commitSweep
}
- // Restart Alice to ensure that she resumes watching the finalized
- // commitment sweep txid.
- ht.RestartNode(alice)
-
// Mine one block and the sweeping transaction should now be broadcast.
// So we fetch the node's mempool to ensure it has been properly
// broadcast.
- ht.MineEmptyBlocks(1)
sweepingTXID := ht.AssertNumTxsInMempool(1)[0]
// Fetch the sweep transaction, all input it's spending should be from
@@ -399,7 +340,28 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
"sweep transaction not spending from commit")
}
- // We expect a resolution which spends our commit output.
+ // For neutrino backend, due to it has no mempool, we need to check the
+ // sweep tx has already been saved to db before restarting. This is due
+ // to the possible race,
+ // - the fee bumper returns a TxPublished event, which is received by
+ // the sweeper and the sweep tx is saved to db.
+ // - the sweeper receives a shutdown signal before it receives the
+ // above event.
+ //
+ // TODO(yy): fix the above race.
+ if ht.IsNeutrinoBackend() {
+ // Check that we can find the commitment sweep in our set of
+ // known sweeps, using the simple transaction id ListSweeps
+ // output.
+ ht.AssertSweepFound(alice, sweepingTXID.String(), false, 0)
+ }
+
+ // Restart Alice to ensure that she resumes watching the finalized
+ // commitment sweep txid.
+ ht.RestartNode(alice)
+
+ // Alice's anchor report won't be created since it's uneconomical to
+ // sweep. We expect a resolution which spends our commit output.
op = fmt.Sprintf("%v:%v", commitSweep.Outpoint.TxidStr,
commitSweep.Outpoint.OutputIndex)
aliceReports[op] = &lnrpc.Resolution{
@@ -410,17 +372,6 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
AmountSat: uint64(aliceBalance),
}
- // Add alice's anchor to our expected set of reports.
- op = fmt.Sprintf("%v:%v", aliceAnchor.Outpoint.TxidStr,
- aliceAnchor.Outpoint.OutputIndex)
- aliceReports[op] = &lnrpc.Resolution{
- ResolutionType: lnrpc.ResolutionType_ANCHOR,
- Outcome: lnrpc.ResolutionOutcome_CLAIMED,
- SweepTxid: sweepingTXID.String(),
- Outpoint: aliceAnchor.Outpoint,
- AmountSat: uint64(anchorSize),
- }
-
// Check that we can find the commitment sweep in our set of known
// sweeps, using the simple transaction id ListSweeps output.
ht.AssertSweepFound(alice, sweepingTXID.String(), false, 0)
@@ -490,17 +441,13 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// Advance the blockchain until just before the CLTV expires, nothing
// exciting should have happened during this time.
- ht.MineEmptyBlocks(cltvHeightDelta)
+ ht.MineBlocks(cltvHeightDelta)
// We now restart Alice, to ensure that she will broadcast the
// presigned htlc timeout txns after the delay expires after
// experiencing a while waiting for the htlc outputs to incubate.
ht.RestartNode(alice)
- // To give the neutrino backend some time to catch up with the chain,
- // we wait here until we have enough UTXOs to
- // ht.AssertNumUTXOs(alice, expectedUtxos)
-
// Alice should now see the channel in her set of pending force closed
// channels with one pending HTLC.
err = wait.NoError(func() error {
@@ -535,24 +482,23 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// Now, generate the block which will cause Alice to offer the
// presigned htlc timeout txns to the sweeper.
- ht.MineEmptyBlocks(1)
+ ht.MineBlocks(1)
// Since Alice had numInvoices (6) htlcs extended to Carol before force
// closing, we expect Alice to broadcast an htlc timeout txn for each
- // one.
- ht.AssertNumPendingSweeps(alice, numInvoices)
+ // one. We also expect Alice to still have her anchor since it's not
+ // swept.
+ ht.AssertNumPendingSweeps(alice, numInvoices+1)
// Wait for them all to show up in the mempool
- //
- // NOTE: after restart, all the htlc timeout txns will be offered to
- // the sweeper with `Immediate` set to true, so they won't be
- // aggregated.
- htlcTxIDs := ht.AssertNumTxsInMempool(numInvoices)
+ htlcTxIDs := ht.AssertNumTxsInMempool(1)
// Retrieve each htlc timeout txn from the mempool, and ensure it is
- // well-formed. This entails verifying that each only spends from
- // output, and that output is from the commitment txn.
- numInputs := 2
+ // well-formed. The sweeping tx should spend all the htlc outputs.
+ //
+ // NOTE: We also add 1 output as the outgoing HTLC is swept using twice
+ // its value as its budget, so a wallet utxo is used.
+ numInputs := 6 + 1
// Construct a map of the already confirmed htlc timeout outpoints,
// that will count the number of times each is spent by the sweep txn.
@@ -561,6 +507,8 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
var htlcTxOutpointSet = make(map[wire.OutPoint]int)
var htlcLessFees uint64
+
+ //nolint:lll
for _, htlcTxID := range htlcTxIDs {
// Fetch the sweep transaction, all input it's spending should
// be from the commitment transaction which was broadcast
@@ -653,10 +601,10 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// Generate a block that mines the htlc timeout txns. Doing so now
// activates the 2nd-stage CSV delayed outputs.
- ht.MineBlocksAndAssertNumTxes(1, numInvoices)
+ ht.MineBlocksAndAssertNumTxes(1, 1)
- // Alice is restarted here to ensure that she promptly moved the crib
- // outputs to the kindergarten bucket after the htlc timeout txns were
+ // Alice is restarted here to ensure that her contract court properly
+ // handles the 2nd-stage sweeps after the htlc timeout txns were
// confirmed.
ht.RestartNode(alice)
@@ -665,12 +613,17 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
currentHeight = int32(ht.CurrentHeight())
ht.Logf("current height: %v, htlcCsvMaturityHeight=%v", currentHeight,
htlcCsvMaturityHeight)
- numBlocks := int(htlcCsvMaturityHeight - uint32(currentHeight) - 2)
- ht.MineEmptyBlocks(numBlocks)
+ numBlocks := int(htlcCsvMaturityHeight - uint32(currentHeight) - 1)
+ ht.MineBlocks(numBlocks)
- // Restart Alice to ensure that she can recover from a failure before
- // having graduated the htlc outputs in the kindergarten bucket.
- ht.RestartNode(alice)
+ // Restart Alice to ensure that she can recover from a failure.
+ //
+ // TODO(yy): Skip this step for neutrino as it cannot recover the
+ // sweeping txns from the mempool. We need to also store the txns in
+ // the sweeper store to make it work for the neutrino case.
+ if !ht.IsNeutrinoBackend() {
+ ht.RestartNode(alice)
+ }
// Now that the channel has been fully swept, it should no longer show
// incubated, check to see that Alice's node still reports the channel
@@ -688,55 +641,13 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
}, defaultTimeout)
require.NoError(ht, err, "timeout while checking force closed channel")
- // Generate a block that causes Alice to sweep the htlc outputs in the
- // kindergarten bucket.
- ht.MineEmptyBlocks(1)
- ht.AssertNumPendingSweeps(alice, numInvoices)
-
- // Mine a block to trigger the sweep.
- ht.MineEmptyBlocks(1)
-
- // A temp hack to ensure the CI is not blocking the current
- // development. There's a known issue in block sync among different
- // subsystems, which is scheduled to be fixed in 0.18.1.
- if ht.IsNeutrinoBackend() {
- // We expect the htlcs to be aggregated into one tx. However,
- // due to block sync issue, they may end up in two txns. Here
- // we assert that there are two txns found in the mempool - if
- // succeeded, it means the aggregation failed, and we won't
- // continue the test.
- //
- // NOTE: we don't check `len(mempool) == 1` because it will
- // give us false positive.
- err := wait.NoError(func() error {
- mempool := ht.Miner().GetRawMempool()
- if len(mempool) == 2 {
- return nil
- }
-
- return fmt.Errorf("expected 2 txes in mempool, found "+
- "%d", len(mempool))
- }, lntest.DefaultTimeout)
- ht.Logf("Assert num of txns got %v", err)
-
- // If there are indeed two txns found in the mempool, we won't
- // continue the test.
- if err == nil {
- ht.Log("Neutrino backend failed to aggregate htlc " +
- "sweeps!")
-
- // Clean the mempool.
- ht.MineBlocksAndAssertNumTxes(1, 2)
-
- return
- }
- }
+ ht.AssertNumPendingSweeps(alice, numInvoices+1)
// Wait for the single sweep txn to appear in the mempool.
- htlcSweepTxID := ht.AssertNumTxsInMempool(1)[0]
+ htlcSweepTxid := ht.AssertNumTxsInMempool(1)[0]
// Fetch the htlc sweep transaction from the mempool.
- htlcSweepTx := ht.GetRawTransaction(htlcSweepTxID)
+ htlcSweepTx := ht.GetRawTransaction(htlcSweepTxid)
// Ensure the htlc sweep transaction only has one input for each htlc
// Alice extended before force closing.
@@ -748,6 +659,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// Ensure that each output spends from exactly one htlc timeout output.
for _, txIn := range htlcSweepTx.MsgTx().TxIn {
outpoint := txIn.PreviousOutPoint
+
// Check that the input is a confirmed htlc timeout txn.
_, ok := htlcTxOutpointSet[outpoint]
require.Truef(ht, ok, "htlc sweep output not spending from "+
@@ -785,11 +697,11 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// Check that we can find the htlc sweep in our set of sweeps using
// the verbose output of the listsweeps output.
- ht.AssertSweepFound(alice, htlcSweepTx.Hash().String(), true, 0)
+ ht.AssertSweepFound(alice, htlcSweepTxid.String(), true, 0)
- // The following restart checks to ensure that the nursery store is
- // storing the txid of the previously broadcast htlc sweep txn, and
- // that it begins watching that txid after restarting.
+ // The following restart checks to ensure that the sweeper is storing
+ // the txid of the previously broadcast htlc sweep txn, and that it
+ // begins watching that txid after restarting.
ht.RestartNode(alice)
// Now that the channel has been fully swept, it should no longer show
@@ -805,7 +717,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
}
err = checkPendingHtlcStageAndMaturity(
- forceClose, 2, htlcCsvMaturityHeight-1, -1,
+ forceClose, 2, htlcCsvMaturityHeight-1, 0,
)
if err != nil {
return err
@@ -818,7 +730,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// Generate the final block that sweeps all htlc funds into the user's
// wallet, and make sure the sweep is in this block.
block := ht.MineBlocksAndAssertNumTxes(1, 1)[0]
- ht.AssertTxInBlock(block, htlcSweepTxID)
+ ht.AssertTxInBlock(block, htlcSweepTxid)
// Now that the channel has been fully swept, it should no longer show
// up within the pending channels RPC.
@@ -847,12 +759,6 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
carolExpectedBalance := btcutil.Amount(carolStartingBalance) +
pushAmt - totalFeeCarol
- // In addition, if this is an anchor-enabled channel, further add the
- // anchor size.
- if lntest.CommitTypeHasAnchors(channelType) {
- carolExpectedBalance += btcutil.Amount(anchorSize)
- }
-
require.Equal(ht, carolExpectedBalance,
btcutil.Amount(carolBalResp.ConfirmedBalance),
"carol's balance is incorrect")
@@ -873,15 +779,13 @@ func padCLTV(cltv uint32) uint32 {
// in the case where a counterparty tries to settle an HTLC with the wrong
// preimage.
func testFailingChannel(ht *lntest.HarnessTest) {
- const paymentAmt = 10000
-
chanAmt := lnd.MaxFundingAmount
// We'll introduce Carol, which will settle any incoming invoice with a
// totally unrelated preimage.
carol := ht.NewNode("Carol", []string{"--hodl.bogus-settle"})
- alice := ht.Alice
+ alice := ht.NewNodeWithCoins("Alice", nil)
ht.ConnectNodes(alice, carol)
// Let Alice connect and open a channel to Carol,
@@ -893,7 +797,7 @@ func testFailingChannel(ht *lntest.HarnessTest) {
invoice := &lnrpc.Invoice{
Memo: "testing",
RPreimage: preimage,
- Value: paymentAmt,
+ Value: invoiceAmt,
}
resp := carol.RPC.AddInvoice(invoice)
@@ -926,12 +830,12 @@ func testFailingChannel(ht *lntest.HarnessTest) {
// Carol will use the correct preimage to resolve the HTLC on-chain.
ht.AssertNumPendingSweeps(carol, 1)
- // Bring down the fee rate estimation, otherwise the following sweep
- // won't happen.
- ht.SetFeeEstimate(chainfee.FeePerKwFloor)
-
- // Mine a block to trigger Carol's sweeper to broadcast the sweeping
- // tx.
+ // Mine a block to trigger the sweep. This is needed because the
+ // preimage extraction logic from the link is not managed by the
+ // blockbeat, which means the preimage may be sent to the contest
+ // resolver after it's launched.
+ //
+ // TODO(yy): Expose blockbeat to the link layer.
ht.MineEmptyBlocks(1)
// Carol should have broadcast her sweeping tx.
@@ -944,9 +848,6 @@ func testFailingChannel(ht *lntest.HarnessTest) {
// Alice's should have one pending sweep request for her commit output.
ht.AssertNumPendingSweeps(alice, 1)
- // Mine a block to trigger the sweep.
- ht.MineEmptyBlocks(1)
-
// Mine Alice's sweeping tx.
ht.MineBlocksAndAssertNumTxes(1, 1)
diff --git a/itest/lnd_channel_funding_fund_max_test.go b/itest/lnd_channel_funding_fund_max_test.go
index 43aec3c982..5a19ccdebd 100644
--- a/itest/lnd_channel_funding_fund_max_test.go
+++ b/itest/lnd_channel_funding_fund_max_test.go
@@ -50,17 +50,14 @@ type chanFundMaxTestCase struct {
private bool
}
-// testChannelFundMax checks various channel funding scenarios where the user
-// instructed the wallet to use all remaining funds.
-func testChannelFundMax(ht *lntest.HarnessTest) {
+// testChannelFundMaxError checks various error channel funding scenarios where
+// the user instructed the wallet to use all remaining funds.
+func testChannelFundMaxError(ht *lntest.HarnessTest) {
// Create two new nodes that open a channel between each other for these
// tests.
args := lntest.NodeArgsForCommitType(lnrpc.CommitmentType_ANCHORS)
alice := ht.NewNode("Alice", args)
- defer ht.Shutdown(alice)
-
bob := ht.NewNode("Bob", args)
- defer ht.Shutdown(bob)
// Ensure both sides are connected so the funding flow can be properly
// executed.
@@ -95,22 +92,6 @@ func testChannelFundMax(ht *lntest.HarnessTest) {
expectedErrStr: "available funds(0.00017877 BTC) " +
"below the minimum amount(0.00020000 BTC)",
},
- {
- name: "wallet amount > min chan " +
- "size (37000sat)",
- initialWalletBalance: 37_000,
- // The transaction fee to open the channel must be
- // subtracted from Alice's balance.
- // (since wallet balance < max-chan-size)
- expectedBalanceAlice: btcutil.Amount(37_000) -
- fundingFee(1, false),
- },
- {
- name: "wallet amount > max chan size " +
- "(20000000sat)",
- initialWalletBalance: 20_000_000,
- expectedBalanceAlice: lnd.MaxFundingAmount,
- },
// Expects, that if the maximum funding amount for a channel is
// pushed to the remote side, then the funding flow is failing
// because the push amount has to be less than the local channel
@@ -140,6 +121,63 @@ func testChannelFundMax(ht *lntest.HarnessTest) {
expectedErrStr: "funder balance too small (-8050000) " +
"with fee=9050 sat, minimum=708 sat required",
},
+ }
+
+ for _, testCase := range testCases {
+ success := ht.Run(
+ testCase.name, func(tt *testing.T) {
+ runFundMaxTestCase(
+ ht, alice, bob, testCase, reserveAmount,
+ )
+ },
+ )
+
+ // Stop at the first failure. Mimic behavior of original test
+ // framework.
+ if !success {
+ break
+ }
+ }
+}
+
+// testChannelFundMaxWalletAmount checks various channel funding scenarios
+// where the user instructed the wallet to use all remaining funds and succeed.
+func testChannelFundMaxWalletAmount(ht *lntest.HarnessTest) {
+ // Create two new nodes that open a channel between each other for these
+ // tests.
+ args := lntest.NodeArgsForCommitType(lnrpc.CommitmentType_ANCHORS)
+ alice := ht.NewNode("Alice", args)
+ bob := ht.NewNode("Bob", args)
+
+ // Ensure both sides are connected so the funding flow can be properly
+ // executed.
+ ht.EnsureConnected(alice, bob)
+
+ // Calculate reserve amount for one channel.
+ reserveResp, _ := alice.RPC.WalletKit.RequiredReserve(
+ context.Background(), &walletrpc.RequiredReserveRequest{
+ AdditionalPublicChannels: 1,
+ },
+ )
+ reserveAmount := btcutil.Amount(reserveResp.RequiredReserve)
+
+ var testCases = []*chanFundMaxTestCase{
+ {
+ name: "wallet amount > min chan " +
+ "size (37000sat)",
+ initialWalletBalance: 37_000,
+ // The transaction fee to open the channel must be
+ // subtracted from Alice's balance.
+ // (since wallet balance < max-chan-size)
+ expectedBalanceAlice: btcutil.Amount(37_000) -
+ fundingFee(1, false),
+ },
+ {
+ name: "wallet amount > max chan size " +
+ "(20000000sat)",
+ initialWalletBalance: 20_000_000,
+ expectedBalanceAlice: lnd.MaxFundingAmount,
+ },
{
name: "wallet amount > max chan size, " +
"push amount 16766000",
@@ -147,7 +185,48 @@ func testChannelFundMax(ht *lntest.HarnessTest) {
pushAmt: 16_766_000,
expectedBalanceAlice: lnd.MaxFundingAmount - 16_766_000,
},
+ }
+
+ for _, testCase := range testCases {
+ success := ht.Run(
+ testCase.name, func(tt *testing.T) {
+ runFundMaxTestCase(
+ ht, alice, bob, testCase, reserveAmount,
+ )
+ },
+ )
+ // Stop at the first failure. Mimic behavior of original test
+ // framework.
+ if !success {
+ break
+ }
+ }
+}
+
+// testChannelFundMaxAnchorReserve checks various channel funding scenarios
+// where the user instructed the wallet to use all remaining funds and its
+// impact on anchor reserve.
+func testChannelFundMaxAnchorReserve(ht *lntest.HarnessTest) {
+ // Create two new nodes that open a channel between each other for these
+ // tests.
+ args := lntest.NodeArgsForCommitType(lnrpc.CommitmentType_ANCHORS)
+ alice := ht.NewNode("Alice", args)
+ bob := ht.NewNode("Bob", args)
+
+ // Ensure both sides are connected so the funding flow can be properly
+ // executed.
+ ht.EnsureConnected(alice, bob)
+
+ // Calculate reserve amount for one channel.
+ reserveResp, _ := alice.RPC.WalletKit.RequiredReserve(
+ context.Background(), &walletrpc.RequiredReserveRequest{
+ AdditionalPublicChannels: 1,
+ },
+ )
+ reserveAmount := btcutil.Amount(reserveResp.RequiredReserve)
+
+ var testCases = []*chanFundMaxTestCase{
{
name: "anchor reserved value",
initialWalletBalance: 100_000,
@@ -229,13 +308,12 @@ func runFundMaxTestCase(ht *lntest.HarnessTest, alice, bob *node.HarnessNode,
// Otherwise, if we expect to open a channel use the helper function.
chanPoint := ht.OpenChannel(alice, bob, chanParams)
+ cType := ht.GetChannelCommitType(alice, chanPoint)
// Close the channel between Alice and Bob, asserting
// that the channel has been properly closed on-chain.
defer ht.CloseChannel(alice, chanPoint)
- cType := ht.GetChannelCommitType(alice, chanPoint)
-
// Alice's balance should be her amount subtracted by the commitment
// transaction fee.
checkChannelBalance(
diff --git a/itest/lnd_channel_funding_utxo_selection_test.go b/itest/lnd_channel_funding_utxo_selection_test.go
index 2b6d0cd301..7868b73338 100644
--- a/itest/lnd_channel_funding_utxo_selection_test.go
+++ b/itest/lnd_channel_funding_utxo_selection_test.go
@@ -15,6 +15,37 @@ import (
"github.com/stretchr/testify/require"
)
+var fundUtxoSelectionTestCases = []*lntest.TestCase{
+ {
+ Name: "utxo selection funding error",
+ TestFunc: testChannelUtxoSelectionError,
+ },
+ {
+ Name: "utxo selection selected valid chan size",
+ TestFunc: testUtxoSelectionSelectedValidChanSize,
+ },
+ {
+ Name: "utxo selection selected valid chan reserve",
+ TestFunc: testUtxoSelectionSelectedValidChanReserve,
+ },
+ {
+ Name: "utxo selection selected reserve from selected",
+ TestFunc: testUtxoSelectionReserveFromSelected,
+ },
+ {
+ Name: "utxo selection fundmax",
+ TestFunc: testUtxoSelectionFundmax,
+ },
+ {
+ Name: "utxo selection fundmax reserve",
+ TestFunc: testUtxoSelectionFundmaxReserve,
+ },
+ {
+ Name: "utxo selection reused utxo",
+ TestFunc: testUtxoSelectionReuseUTXO,
+ },
+}
+
type chanFundUtxoSelectionTestCase struct {
// name is the name of the target test case.
name string
@@ -57,17 +88,15 @@ type chanFundUtxoSelectionTestCase struct {
reuseUtxo bool
}
-// testChannelUtxoSelection checks various channel funding scenarios where the
-// user instructed the wallet to use a selection funds available in the wallet.
-func testChannelUtxoSelection(ht *lntest.HarnessTest) {
+// testChannelUtxoSelectionError checks various channel funding error scenarios
+// where the user instructed the wallet to use a selection funds available in
+// the wallet.
+func testChannelUtxoSelectionError(ht *lntest.HarnessTest) {
// Create two new nodes that open a channel between each other for these
// tests.
args := lntest.NodeArgsForCommitType(lnrpc.CommitmentType_ANCHORS)
alice := ht.NewNode("Alice", args)
- defer ht.Shutdown(alice)
-
bob := ht.NewNode("Bob", args)
- defer ht.Shutdown(bob)
// Ensure both sides are connected so the funding flow can be properly
// executed.
@@ -118,73 +147,6 @@ func testChannelUtxoSelection(ht *lntest.HarnessTest) {
"create funding transaction, need 0.00210337 " +
"BTC only have 0.00100000 BTC available",
},
- // We are spending two selected coins partially out of three
- // available in the wallet and expect a change output and the
- // unselected coin as remaining wallet balance.
- {
- name: "selected, local amount > " +
- "min chan size",
- initialCoins: []btcutil.Amount{
- 200_000, 50_000, 100_000,
- },
- selectedCoins: []btcutil.Amount{
- 200_000, 100_000,
- },
- localAmt: btcutil.Amount(250_000),
- expectedBalance: btcutil.Amount(250_000),
- remainingWalletBalance: btcutil.Amount(350_000) -
- btcutil.Amount(250_000) - fundingFee(2, true),
- },
- // We are spending the entirety of two selected coins out of
- // three available in the wallet and expect no change output and
- // the unselected coin as remaining wallet balance.
- {
- name: "fundmax, local amount > min " +
- "chan size",
- initialCoins: []btcutil.Amount{
- 200_000, 100_000, 50_000,
- },
- selectedCoins: []btcutil.Amount{
- 200_000, 50_000,
- },
- expectedBalance: btcutil.Amount(200_000) +
- btcutil.Amount(50_000) - fundingFee(2, false),
- remainingWalletBalance: btcutil.Amount(100_000),
- },
- // Select all coins in wallet and use the maximum available
- // local amount to fund an anchor channel.
- {
- name: "selected, local amount leaves sufficient " +
- "reserve",
- initialCoins: []btcutil.Amount{
- 200_000, 100_000,
- },
- selectedCoins: []btcutil.Amount{200_000, 100_000},
- commitmentType: lnrpc.CommitmentType_ANCHORS,
- localAmt: btcutil.Amount(300_000) -
- reserveAmount - fundingFee(2, true),
- expectedBalance: btcutil.Amount(300_000) -
- reserveAmount - fundingFee(2, true),
- remainingWalletBalance: reserveAmount,
- },
- // Select all coins in wallet towards local amount except for an
- // anchor reserve portion. Because the UTXOs are sorted by size
- // by default, the reserve amount is just left in the wallet.
- {
- name: "selected, reserve from selected",
- initialCoins: []btcutil.Amount{
- 200_000, reserveAmount, 100_000,
- },
- selectedCoins: []btcutil.Amount{
- 200_000, reserveAmount, 100_000,
- },
- commitmentType: lnrpc.CommitmentType_ANCHORS,
- localAmt: btcutil.Amount(300_000) -
- fundingFee(2, true),
- expectedBalance: btcutil.Amount(300_000) -
- fundingFee(2, true),
- remainingWalletBalance: reserveAmount,
- },
// Select all coins in wallet and use more than the maximum
// available local amount to fund an anchor channel.
{
@@ -203,43 +165,6 @@ func testChannelUtxoSelection(ht *lntest.HarnessTest) {
"insufficient funds for fee bumping anchor " +
"channel closings",
},
- // We fund an anchor channel with a single coin and just keep
- // enough funds in the wallet to cover for the anchor reserve.
- {
- name: "fundmax, sufficient reserve",
- initialCoins: []btcutil.Amount{
- 200_000, reserveAmount,
- },
- selectedCoins: []btcutil.Amount{200_000},
- commitmentType: lnrpc.CommitmentType_ANCHORS,
- expectedBalance: btcutil.Amount(200_000) -
- fundingFee(1, false),
- remainingWalletBalance: reserveAmount,
- },
- // We fund an anchor channel with a single coin and expect the
- // reserve amount left in the wallet.
- {
- name: "fundmax, sufficient reserve from channel " +
- "balance carve out",
- initialCoins: []btcutil.Amount{
- 200_000,
- },
- selectedCoins: []btcutil.Amount{200_000},
- commitmentType: lnrpc.CommitmentType_ANCHORS,
- expectedBalance: btcutil.Amount(200_000) -
- reserveAmount - fundingFee(1, true),
- remainingWalletBalance: reserveAmount,
- },
- // Confirm that already spent outputs can't be reused to fund
- // another channel.
- {
- name: "output already spent",
- initialCoins: []btcutil.Amount{
- 200_000,
- },
- selectedCoins: []btcutil.Amount{200_000},
- reuseUtxo: true,
- },
}
for _, tc := range tcs {
@@ -258,24 +183,258 @@ func testChannelUtxoSelection(ht *lntest.HarnessTest) {
}
}
+// testChannelUtxoSelection checks various channel funding scenarios where the
+// user instructed the wallet to use a selection funds available in the wallet.
+func testUtxoSelectionSelectedValidChanSize(ht *lntest.HarnessTest) {
+ // Create two new nodes that open a channel between each other for these
+ // tests.
+ args := lntest.NodeArgsForCommitType(lnrpc.CommitmentType_ANCHORS)
+ alice := ht.NewNode("Alice", args)
+ bob := ht.NewNode("Bob", args)
+
+ // Ensure both sides are connected so the funding flow can be properly
+ // executed.
+ ht.EnsureConnected(alice, bob)
+
+ // Calculate reserve amount for one channel.
+ reserveResp, _ := alice.RPC.WalletKit.RequiredReserve(
+ context.Background(), &walletrpc.RequiredReserveRequest{
+ AdditionalPublicChannels: 1,
+ },
+ )
+
+ reserveAmount := btcutil.Amount(reserveResp.RequiredReserve)
+
+ // We are spending two selected coins partially out of three available
+ // in the wallet and expect a change output and the unselected coin as
+ // remaining wallet balance.
+ tc := &chanFundUtxoSelectionTestCase{
+ name: "selected, local amount > min chan size",
+ initialCoins: []btcutil.Amount{
+ 200_000, 50_000, 100_000,
+ },
+ selectedCoins: []btcutil.Amount{
+ 200_000, 100_000,
+ },
+ localAmt: btcutil.Amount(250_000),
+ expectedBalance: btcutil.Amount(250_000),
+ remainingWalletBalance: btcutil.Amount(350_000) -
+ btcutil.Amount(250_000) - fundingFee(2, true),
+ }
+
+ runUtxoSelectionTestCase(ht, alice, bob, tc, reserveAmount)
+}
+
+// testChannelUtxoSelection checks various channel funding scenarios where the
+// user instructed the wallet to use a selection funds available in the wallet.
+func testUtxoSelectionSelectedValidChanReserve(ht *lntest.HarnessTest) {
+ // Create two new nodes that open a channel between each other for these
+ // tests.
+ args := lntest.NodeArgsForCommitType(lnrpc.CommitmentType_ANCHORS)
+ alice := ht.NewNode("Alice", args)
+ bob := ht.NewNode("Bob", args)
+
+ // Ensure both sides are connected so the funding flow can be properly
+ // executed.
+ ht.EnsureConnected(alice, bob)
+
+ // Calculate reserve amount for one channel.
+ reserveResp, _ := alice.RPC.WalletKit.RequiredReserve(
+ context.Background(), &walletrpc.RequiredReserveRequest{
+ AdditionalPublicChannels: 1,
+ },
+ )
+
+ reserveAmount := btcutil.Amount(reserveResp.RequiredReserve)
+
+ // Select all coins in wallet and use the maximum available
+ // local amount to fund an anchor channel.
+ tc := &chanFundUtxoSelectionTestCase{
+ name: "selected, local amount leaves sufficient reserve",
+ initialCoins: []btcutil.Amount{
+ 200_000, 100_000,
+ },
+ selectedCoins: []btcutil.Amount{200_000, 100_000},
+ commitmentType: lnrpc.CommitmentType_ANCHORS,
+ localAmt: btcutil.Amount(300_000) -
+ reserveAmount - fundingFee(2, true),
+ expectedBalance: btcutil.Amount(300_000) -
+ reserveAmount - fundingFee(2, true),
+ remainingWalletBalance: reserveAmount,
+ }
+
+ runUtxoSelectionTestCase(ht, alice, bob, tc, reserveAmount)
+}
+
+// testChannelUtxoSelection checks various channel funding scenarios where the
+// user instructed the wallet to use a selection funds available in the wallet.
+func testUtxoSelectionReserveFromSelected(ht *lntest.HarnessTest) {
+ // Create two new nodes that open a channel between each other for these
+ // tests.
+ args := lntest.NodeArgsForCommitType(lnrpc.CommitmentType_ANCHORS)
+ alice := ht.NewNode("Alice", args)
+ bob := ht.NewNode("Bob", args)
+
+ // Ensure both sides are connected so the funding flow can be properly
+ // executed.
+ ht.EnsureConnected(alice, bob)
+
+ // Calculate reserve amount for one channel.
+ reserveResp, _ := alice.RPC.WalletKit.RequiredReserve(
+ context.Background(), &walletrpc.RequiredReserveRequest{
+ AdditionalPublicChannels: 1,
+ },
+ )
+
+ reserveAmount := btcutil.Amount(reserveResp.RequiredReserve)
+
+ // Select all coins in wallet towards local amount except for an anchor
+ // reserve portion. Because the UTXOs are sorted by size by default,
+ // the reserve amount is just left in the wallet.
+ tc := &chanFundUtxoSelectionTestCase{
+ name: "selected, reserve from selected",
+ initialCoins: []btcutil.Amount{
+ 200_000, reserveAmount, 100_000,
+ },
+ selectedCoins: []btcutil.Amount{
+ 200_000, reserveAmount, 100_000,
+ },
+ commitmentType: lnrpc.CommitmentType_ANCHORS,
+ localAmt: btcutil.Amount(300_000) -
+ fundingFee(2, true),
+ expectedBalance: btcutil.Amount(300_000) -
+ fundingFee(2, true),
+ remainingWalletBalance: reserveAmount,
+ }
+
+ runUtxoSelectionTestCase(ht, alice, bob, tc, reserveAmount)
+}
+
+// testChannelUtxoSelection checks various channel funding scenarios where the
+// user instructed the wallet to use a selection funds available in the wallet.
+func testUtxoSelectionFundmax(ht *lntest.HarnessTest) {
+ // Create two new nodes that open a channel between each other for these
+ // tests.
+ args := lntest.NodeArgsForCommitType(lnrpc.CommitmentType_ANCHORS)
+ alice := ht.NewNode("Alice", args)
+ bob := ht.NewNode("Bob", args)
+
+ // Ensure both sides are connected so the funding flow can be properly
+ // executed.
+ ht.EnsureConnected(alice, bob)
+
+ // Calculate reserve amount for one channel.
+ reserveResp, _ := alice.RPC.WalletKit.RequiredReserve(
+ context.Background(), &walletrpc.RequiredReserveRequest{
+ AdditionalPublicChannels: 1,
+ },
+ )
+
+ reserveAmount := btcutil.Amount(reserveResp.RequiredReserve)
+
+ // We fund an anchor channel with a single coin and just keep enough
+ // funds in the wallet to cover for the anchor reserve.
+ tc := &chanFundUtxoSelectionTestCase{
+ name: "fundmax, sufficient reserve",
+ initialCoins: []btcutil.Amount{
+ 200_000, reserveAmount,
+ },
+ selectedCoins: []btcutil.Amount{200_000},
+ commitmentType: lnrpc.CommitmentType_ANCHORS,
+ expectedBalance: btcutil.Amount(200_000) -
+ fundingFee(1, false),
+ remainingWalletBalance: reserveAmount,
+ }
+
+ runUtxoSelectionTestCase(ht, alice, bob, tc, reserveAmount)
+}
+
+// testChannelUtxoSelection checks various channel funding scenarios where the
+// user instructed the wallet to use a selection funds available in the wallet.
+func testUtxoSelectionFundmaxReserve(ht *lntest.HarnessTest) {
+ // Create two new nodes that open a channel between each other for these
+ // tests.
+ args := lntest.NodeArgsForCommitType(lnrpc.CommitmentType_ANCHORS)
+ alice := ht.NewNode("Alice", args)
+ bob := ht.NewNode("Bob", args)
+
+ // Ensure both sides are connected so the funding flow can be properly
+ // executed.
+ ht.EnsureConnected(alice, bob)
+
+ // Calculate reserve amount for one channel.
+ reserveResp, _ := alice.RPC.WalletKit.RequiredReserve(
+ context.Background(), &walletrpc.RequiredReserveRequest{
+ AdditionalPublicChannels: 1,
+ },
+ )
+
+ reserveAmount := btcutil.Amount(reserveResp.RequiredReserve)
+
+ // We fund an anchor channel with a single coin and expect the reserve
+ // amount left in the wallet.
+ tc := &chanFundUtxoSelectionTestCase{
+ name: "fundmax, sufficient reserve from channel " +
+ "balance carve out",
+ initialCoins: []btcutil.Amount{
+ 200_000,
+ },
+ selectedCoins: []btcutil.Amount{200_000},
+ commitmentType: lnrpc.CommitmentType_ANCHORS,
+ expectedBalance: btcutil.Amount(200_000) -
+ reserveAmount - fundingFee(1, true),
+ remainingWalletBalance: reserveAmount,
+ }
+
+ runUtxoSelectionTestCase(ht, alice, bob, tc, reserveAmount)
+}
+
+// testChannelUtxoSelection checks various channel funding scenarios where the
+// user instructed the wallet to use a selection funds available in the wallet.
+func testUtxoSelectionReuseUTXO(ht *lntest.HarnessTest) {
+ // Create two new nodes that open a channel between each other for these
+ // tests.
+ args := lntest.NodeArgsForCommitType(lnrpc.CommitmentType_ANCHORS)
+ alice := ht.NewNode("Alice", args)
+ bob := ht.NewNode("Bob", args)
+
+ // Ensure both sides are connected so the funding flow can be properly
+ // executed.
+ ht.EnsureConnected(alice, bob)
+
+ // Calculate reserve amount for one channel.
+ reserveResp, _ := alice.RPC.WalletKit.RequiredReserve(
+ context.Background(), &walletrpc.RequiredReserveRequest{
+ AdditionalPublicChannels: 1,
+ },
+ )
+
+ reserveAmount := btcutil.Amount(reserveResp.RequiredReserve)
+
+ // Confirm that already spent outputs can't be reused to fund another
+ // channel.
+ tc := &chanFundUtxoSelectionTestCase{
+ name: "output already spent",
+ initialCoins: []btcutil.Amount{
+ 200_000,
+ },
+ selectedCoins: []btcutil.Amount{200_000},
+ reuseUtxo: true,
+ }
+
+ runUtxoSelectionTestCase(ht, alice, bob, tc, reserveAmount)
+}
+
// runUtxoSelectionTestCase runs a single test case asserting that test
// conditions are met.
func runUtxoSelectionTestCase(ht *lntest.HarnessTest, alice,
bob *node.HarnessNode, tc *chanFundUtxoSelectionTestCase,
reserveAmount btcutil.Amount) {
- // fund initial coins
+ // Fund initial coins.
for _, initialCoin := range tc.initialCoins {
ht.FundCoins(initialCoin, alice)
}
- defer func() {
- // Fund additional coins to sweep in case the wallet contains
- // dust.
- ht.FundCoins(100_000, alice)
-
- // Remove all funds from Alice.
- sweepNodeWalletAndAssert(ht, alice)
- }()
// Create an outpoint lookup for each unique amount.
lookup := make(map[int64]*lnrpc.OutPoint)
@@ -317,9 +476,14 @@ func runUtxoSelectionTestCase(ht *lntest.HarnessTest, alice,
// successful, simply check for an error.
if tc.chanOpenShouldFail {
expectedErr := errors.New(tc.expectedErrStr)
- ht.OpenChannelAssertErr(
- alice, bob, chanParams, expectedErr,
- )
+ ht.OpenChannelAssertErr(alice, bob, chanParams, expectedErr)
+
+ // Fund additional coins to sweep in case the wallet contains
+ // dust.
+ ht.FundCoins(100_000, alice)
+
+ // Remove all funds from Alice.
+ sweepNodeWalletAndAssert(ht, alice)
return
}
diff --git a/itest/lnd_channel_graph_test.go b/itest/lnd_channel_graph_test.go
index 54ddd2ca21..b21da219ab 100644
--- a/itest/lnd_channel_graph_test.go
+++ b/itest/lnd_channel_graph_test.go
@@ -218,7 +218,9 @@ func testUpdateChanStatus(ht *lntest.HarnessTest) {
// describeGraph RPC request unless explicitly asked for.
func testUnannouncedChannels(ht *lntest.HarnessTest) {
amount := funding.MaxBtcFundingAmount
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNode("Bob", nil)
+ ht.EnsureConnected(alice, bob)
// Open a channel between Alice and Bob, ensuring the
// channel has been opened properly.
@@ -232,23 +234,20 @@ func testUnannouncedChannels(ht *lntest.HarnessTest) {
// One block is enough to make the channel ready for use, since the
// nodes have defaultNumConfs=1 set.
- fundingChanPoint := ht.WaitForChannelOpenEvent(chanOpenUpdate)
+ ht.WaitForChannelOpenEvent(chanOpenUpdate)
// Alice should have 1 edge in her graph.
- ht.AssertNumActiveEdges(alice, 1, true)
+ ht.AssertNumEdges(alice, 1, true)
// Channels should not be announced yet, hence Alice should have no
// announced edges in her graph.
- ht.AssertNumActiveEdges(alice, 0, false)
+ ht.AssertNumEdges(alice, 0, false)
// Mine 4 more blocks, and check that the channel is now announced.
ht.MineBlocks(4)
// Give the network a chance to learn that auth proof is confirmed.
- ht.AssertNumActiveEdges(alice, 1, false)
-
- // Close the channel used during the test.
- ht.CloseChannel(alice, fundingChanPoint)
+ ht.AssertNumEdges(alice, 1, false)
}
func testGraphTopologyNotifications(ht *lntest.HarnessTest) {
@@ -267,14 +266,10 @@ func testGraphTopologyNtfns(ht *lntest.HarnessTest, pinned bool) {
// Spin up Bob first, since we will need to grab his pubkey when
// starting Alice to test pinned syncing.
- bob := ht.Bob
+ bob := ht.NewNodeWithCoins("Bob", nil)
bobInfo := bob.RPC.GetInfo()
bobPubkey := bobInfo.IdentityPubkey
- // Restart Bob as he may have leftover announcements from previous
- // tests, causing the graph to be unsynced.
- ht.RestartNodeWithExtraArgs(bob, nil)
-
// For unpinned syncing, start Alice as usual. Otherwise grab Bob's
// pubkey to include in his pinned syncer set.
var aliceArgs []string
@@ -285,8 +280,7 @@ func testGraphTopologyNtfns(ht *lntest.HarnessTest, pinned bool) {
}
}
- alice := ht.Alice
- ht.RestartNodeWithExtraArgs(alice, aliceArgs)
+ alice := ht.NewNodeWithCoins("Alice", aliceArgs)
// Connect Alice and Bob.
ht.EnsureConnected(alice, bob)
@@ -370,16 +364,15 @@ func testGraphTopologyNtfns(ht *lntest.HarnessTest, pinned bool) {
// Bob's new node announcement, and the channel between Bob and Carol.
ht.AssertNumChannelUpdates(alice, chanPoint, 2)
ht.AssertNumNodeAnns(alice, bob.PubKeyStr, 1)
-
- // Close the channel between Bob and Carol.
- ht.CloseChannel(bob, chanPoint)
}
// testNodeAnnouncement ensures that when a node is started with one or more
// external IP addresses specified on the command line, that those addresses
// announced to the network and reported in the network graph.
func testNodeAnnouncement(ht *lntest.HarnessTest) {
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNode("Alice", nil)
+ bob := ht.NewNodeWithCoins("Bob", nil)
+ ht.EnsureConnected(alice, bob)
advertisedAddrs := []string{
"192.168.1.1:8333",
@@ -403,7 +396,7 @@ func testNodeAnnouncement(ht *lntest.HarnessTest) {
// We'll then go ahead and open a channel between Bob and Dave. This
// ensures that Alice receives the node announcement from Bob as part of
// the announcement broadcast.
- chanPoint := ht.OpenChannel(
+ ht.OpenChannel(
bob, dave, lntest.OpenChannelParams{Amt: 1000000},
)
@@ -425,16 +418,15 @@ func testNodeAnnouncement(ht *lntest.HarnessTest) {
allUpdates := ht.AssertNumNodeAnns(alice, dave.PubKeyStr, 1)
nodeUpdate := allUpdates[len(allUpdates)-1]
assertAddrs(nodeUpdate.Addresses, advertisedAddrs...)
-
- // Close the channel between Bob and Dave.
- ht.CloseChannel(bob, chanPoint)
}
// testUpdateNodeAnnouncement ensures that the RPC endpoint validates
// the requests correctly and that the new node announcement is brodcasted
// with the right information after updating our node.
func testUpdateNodeAnnouncement(ht *lntest.HarnessTest) {
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNode("Alice", nil)
+ bob := ht.NewNodeWithCoins("Bob", nil)
+ ht.EnsureConnected(alice, bob)
var lndArgs []string
@@ -530,7 +522,7 @@ func testUpdateNodeAnnouncement(ht *lntest.HarnessTest) {
// Go ahead and open a channel between Bob and Dave. This
// ensures that Alice receives the node announcement from Bob as part of
// the announcement broadcast.
- chanPoint := ht.OpenChannel(
+ ht.OpenChannel(
bob, dave, lntest.OpenChannelParams{
Amt: 1000000,
},
@@ -660,9 +652,6 @@ func testUpdateNodeAnnouncement(ht *lntest.HarnessTest) {
FeatureUpdates: updateFeatureActions,
}
dave.RPC.UpdateNodeAnnouncementErr(nodeAnnReq)
-
- // Close the channel between Bob and Dave.
- ht.CloseChannel(bob, chanPoint)
}
// assertSyncType asserts that the peer has an expected syncType.
diff --git a/itest/lnd_channel_policy_test.go b/itest/lnd_channel_policy_test.go
index bb05209753..da9a2ac16d 100644
--- a/itest/lnd_channel_policy_test.go
+++ b/itest/lnd_channel_policy_test.go
@@ -30,19 +30,16 @@ func testUpdateChannelPolicy(ht *lntest.HarnessTest) {
chanAmt := funding.MaxBtcFundingAmount
pushAmt := chanAmt / 2
- alice, bob := ht.Alice, ht.Bob
-
// Create a channel Alice->Bob.
- chanPoint := ht.OpenChannel(
- alice, bob, lntest.OpenChannelParams{
+ chanPoints, nodes := ht.CreateSimpleNetwork(
+ [][]string{nil, nil}, lntest.OpenChannelParams{
Amt: chanAmt,
PushAmt: pushAmt,
},
)
- // We add all the nodes' update channels to a slice, such that we can
- // make sure they all receive the expected updates.
- nodes := []*node.HarnessNode{alice, bob}
+ alice, bob := nodes[0], nodes[1]
+ chanPoint := chanPoints[0]
// Alice and Bob should see each other's ChannelUpdates, advertising the
// default routing policies. We do not currently set any inbound fees.
@@ -422,11 +419,6 @@ func testUpdateChannelPolicy(ht *lntest.HarnessTest) {
ht.AssertChannelPolicy(
carol, alice.PubKeyStr, expectedPolicy, chanPoint3,
)
-
- // Close all channels.
- ht.CloseChannel(alice, chanPoint)
- ht.CloseChannel(bob, chanPoint2)
- ht.CloseChannel(alice, chanPoint3)
}
// testSendUpdateDisableChannel ensures that a channel update with the disable
@@ -440,7 +432,8 @@ func testUpdateChannelPolicy(ht *lntest.HarnessTest) {
func testSendUpdateDisableChannel(ht *lntest.HarnessTest) {
const chanAmt = 100000
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNode("Bob", nil)
// Create a new node Eve, which will be restarted later with a config
// that has an inactive channel timeout of just 6 seconds (down from
@@ -677,7 +670,9 @@ func testUpdateChannelPolicyForPrivateChannel(ht *lntest.HarnessTest) {
// We'll create the following topology first,
// Alice <--public:100k--> Bob <--private:100k--> Carol
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNodeWithCoins("Bob", nil)
+ ht.EnsureConnected(alice, bob)
// Open a channel with 100k satoshis between Alice and Bob.
chanPointAliceBob := ht.OpenChannel(
@@ -772,10 +767,6 @@ func testUpdateChannelPolicyForPrivateChannel(ht *lntest.HarnessTest) {
// Alice should have sent 20k satoshis + fee to Bob.
ht.AssertAmountPaid("Alice(local) => Bob(remote)",
alice, chanPointAliceBob, amtExpected, 0)
-
- // Finally, close the channels.
- ht.CloseChannel(alice, chanPointAliceBob)
- ht.CloseChannel(bob, chanPointBobCarol)
}
// testUpdateChannelPolicyFeeRateAccuracy tests that updating the channel policy
@@ -786,16 +777,14 @@ func testUpdateChannelPolicyFeeRateAccuracy(ht *lntest.HarnessTest) {
pushAmt := chanAmt / 2
// Create a channel Alice -> Bob.
- alice, bob := ht.Alice, ht.Bob
- chanPoint := ht.OpenChannel(
- alice, bob, lntest.OpenChannelParams{
+ chanPoints, nodes := ht.CreateSimpleNetwork(
+ [][]string{nil, nil}, lntest.OpenChannelParams{
Amt: chanAmt,
PushAmt: pushAmt,
},
)
-
- // Nodes that we need to make sure receive the channel updates.
- nodes := []*node.HarnessNode{alice, bob}
+ alice := nodes[0]
+ chanPoint := chanPoints[0]
baseFee := int64(1500)
timeLockDelta := uint32(66)
@@ -846,8 +835,6 @@ func testUpdateChannelPolicyFeeRateAccuracy(ht *lntest.HarnessTest) {
// Make sure that both Alice and Bob sees the same policy after update.
assertNodesPolicyUpdate(ht, nodes, alice, expectedPolicy, chanPoint)
-
- ht.CloseChannel(alice, chanPoint)
}
// assertNodesPolicyUpdate checks that a given policy update has been received
diff --git a/itest/lnd_coop_close_external_delivery_test.go b/itest/lnd_coop_close_external_delivery_test.go
index 57c2da2f4f..8341a2f650 100644
--- a/itest/lnd_coop_close_external_delivery_test.go
+++ b/itest/lnd_coop_close_external_delivery_test.go
@@ -58,7 +58,8 @@ func testCoopCloseWithExternalDelivery(ht *lntest.HarnessTest) {
func testCoopCloseWithExternalDeliveryImpl(ht *lntest.HarnessTest,
upfrontShutdown bool, deliveryAddressType lnrpc.AddressType) {
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNodeWithCoins("bob", nil)
ht.ConnectNodes(alice, bob)
// Here we generate a final delivery address in bob's wallet but set by
diff --git a/itest/lnd_coop_close_with_htlcs_test.go b/itest/lnd_coop_close_with_htlcs_test.go
index 2b7bcad729..b5d5f516b9 100644
--- a/itest/lnd_coop_close_with_htlcs_test.go
+++ b/itest/lnd_coop_close_with_htlcs_test.go
@@ -37,7 +37,8 @@ func testCoopCloseWithHtlcs(ht *lntest.HarnessTest) {
// channel party initiates a channel shutdown while an HTLC is still pending on
// the channel.
func coopCloseWithHTLCs(ht *lntest.HarnessTest) {
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNodeWithCoins("bob", nil)
ht.ConnectNodes(alice, bob)
// Here we set up a channel between Alice and Bob, beginning with a
@@ -128,7 +129,8 @@ func coopCloseWithHTLCs(ht *lntest.HarnessTest) {
// process continues as expected even if a channel re-establish happens after
// one party has already initiated the shutdown.
func coopCloseWithHTLCsWithRestart(ht *lntest.HarnessTest) {
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNodeWithCoins("bob", nil)
ht.ConnectNodes(alice, bob)
// Open a channel between Alice and Bob with the balance split equally.
diff --git a/itest/lnd_custom_features.go b/itest/lnd_custom_features.go
index 4867ca2d11..701a86e79b 100644
--- a/itest/lnd_custom_features.go
+++ b/itest/lnd_custom_features.go
@@ -27,20 +27,18 @@ func testCustomFeatures(ht *lntest.HarnessTest) {
fmt.Sprintf("--protocol.custom-nodeann=%v", customNodeAnn),
fmt.Sprintf("--protocol.custom-invoice=%v", customInvoice),
}
- ht.RestartNodeWithExtraArgs(ht.Alice, extraArgs)
+ cfgs := [][]string{extraArgs, nil}
- // Connect nodes and open a channel so that Alice will be included
- // in Bob's graph.
- ht.ConnectNodes(ht.Alice, ht.Bob)
- chanPoint := ht.OpenChannel(
- ht.Alice, ht.Bob, lntest.OpenChannelParams{Amt: 1000000},
+ _, nodes := ht.CreateSimpleNetwork(
+ cfgs, lntest.OpenChannelParams{Amt: 1000000},
)
+ alice, bob := nodes[0], nodes[1]
// Check that Alice's custom feature bit was sent to Bob in her init
// message.
- peers := ht.Bob.RPC.ListPeers()
+ peers := bob.RPC.ListPeers()
require.Len(ht, peers.Peers, 1)
- require.Equal(ht, peers.Peers[0].PubKey, ht.Alice.PubKeyStr)
+ require.Equal(ht, peers.Peers[0].PubKey, alice.PubKeyStr)
_, customInitSet := peers.Peers[0].Features[customInit]
require.True(ht, customInitSet)
@@ -51,7 +49,7 @@ func testCustomFeatures(ht *lntest.HarnessTest) {
// Assert that Alice's custom feature bit is contained in the node
// announcement sent to Bob.
- updates := ht.AssertNumNodeAnns(ht.Bob, ht.Alice.PubKeyStr, 1)
+ updates := ht.AssertNumNodeAnns(bob, alice.PubKeyStr, 1)
features := updates[len(updates)-1].Features
_, customFeature := features[customNodeAnn]
require.True(ht, customFeature)
@@ -60,8 +58,8 @@ func testCustomFeatures(ht *lntest.HarnessTest) {
)
// Assert that Alice's custom feature bit is included in invoices.
- invoice := ht.Alice.RPC.AddInvoice(&lnrpc.Invoice{})
- payReq := ht.Alice.RPC.DecodePayReq(invoice.PaymentRequest)
+ invoice := alice.RPC.AddInvoice(&lnrpc.Invoice{})
+ payReq := alice.RPC.DecodePayReq(invoice.PaymentRequest)
_, customInvoiceSet := payReq.Features[customInvoice]
require.True(ht, customInvoiceSet)
assertFeatureNotInSet(
@@ -79,9 +77,7 @@ func testCustomFeatures(ht *lntest.HarnessTest) {
},
},
}
- ht.Alice.RPC.UpdateNodeAnnouncementErr(nodeAnnReq)
-
- ht.CloseChannel(ht.Alice, chanPoint)
+ alice.RPC.UpdateNodeAnnouncementErr(nodeAnnReq)
}
// assertFeatureNotInSet checks that the features provided aren't contained in
diff --git a/itest/lnd_custom_message_test.go b/itest/lnd_custom_message_test.go
index 14a4e79493..93a92a2d66 100644
--- a/itest/lnd_custom_message_test.go
+++ b/itest/lnd_custom_message_test.go
@@ -14,8 +14,6 @@ import (
// types (within the message type range usually reserved for protocol messages)
// via the send and subscribe custom message APIs.
func testCustomMessage(ht *lntest.HarnessTest) {
- alice, bob := ht.Alice, ht.Bob
-
var (
overrideType1 uint32 = 554
overrideType2 uint32 = 555
@@ -27,7 +25,8 @@ func testCustomMessage(ht *lntest.HarnessTest) {
extraArgs := []string{
fmt.Sprintf(msgOverrideArg, overrideType1),
}
- ht.RestartNodeWithExtraArgs(alice, extraArgs)
+ alice := ht.NewNode("Alice", extraArgs)
+ bob := ht.NewNode("Bob", nil)
// Subscribe Alice to custom messages before we send any, so that we
// don't miss any.
diff --git a/itest/lnd_estimate_route_fee_test.go b/itest/lnd_estimate_route_fee_test.go
index 8ed0be2725..e33aa6bb8f 100644
--- a/itest/lnd_estimate_route_fee_test.go
+++ b/itest/lnd_estimate_route_fee_test.go
@@ -93,23 +93,19 @@ func testEstimateRouteFee(ht *lntest.HarnessTest) {
// added to the invoice always have enough liquidity, but here we check
// that the prober uses the more expensive route.
ht.EnsureConnected(mts.bob, paula)
- channelPointBobPaula := ht.OpenChannel(
- mts.bob, paula, lntest.OpenChannelParams{
- Private: true,
- Amt: 90_000,
- PushAmt: 69_000,
- },
- )
+ ht.OpenChannel(mts.bob, paula, lntest.OpenChannelParams{
+ Private: true,
+ Amt: 90_000,
+ PushAmt: 69_000,
+ })
ht.EnsureConnected(mts.eve, paula)
- channelPointEvePaula := ht.OpenChannel(
- mts.eve, paula, lntest.OpenChannelParams{
- Private: true,
- Amt: 1_000_000,
- },
- )
+ ht.OpenChannel(mts.eve, paula, lntest.OpenChannelParams{
+ Private: true,
+ Amt: 1_000_000,
+ })
- bobsPrivChannels := ht.Bob.RPC.ListChannels(&lnrpc.ListChannelsRequest{
+ bobsPrivChannels := mts.bob.RPC.ListChannels(&lnrpc.ListChannelsRequest{
PrivateOnly: true,
})
require.Len(ht, bobsPrivChannels.Channels, 1)
@@ -242,6 +238,8 @@ func testEstimateRouteFee(ht *lntest.HarnessTest) {
locktime := initialBlockHeight + defaultTimelock +
int64(routing.BlockPadding)
+ noChanNode := ht.NewNode("ImWithoutChannels", nil)
+
var testCases = []*estimateRouteFeeTestCase{
// Single hop payment is free.
{
@@ -303,10 +301,8 @@ func testEstimateRouteFee(ht *lntest.HarnessTest) {
{
name: "single hop hint, destination " +
"without channels",
- probing: true,
- destination: ht.NewNode(
- "ImWithoutChannels", nil,
- ),
+ probing: true,
+ destination: noChanNode,
routeHints: singleRouteHint,
expectedRoutingFeesMsat: feeACBP,
expectedCltvDelta: locktime + deltaACBP,
@@ -356,12 +352,6 @@ func testEstimateRouteFee(ht *lntest.HarnessTest) {
break
}
}
-
- mts.ht.CloseChannelAssertPending(mts.bob, channelPointBobPaula, false)
- mts.ht.CloseChannelAssertPending(mts.eve, channelPointEvePaula, false)
- ht.MineBlocksAndAssertNumTxes(1, 2)
-
- mts.closeChannels()
}
// runTestCase runs a single test case asserting that test conditions are met.
@@ -376,7 +366,7 @@ func runFeeEstimationTestCase(ht *lntest.HarnessTest,
)
feeReq = &routerrpc.RouteFeeRequest{
PaymentRequest: payReqs[0],
- Timeout: 10,
+ Timeout: 60,
}
} else {
feeReq = &routerrpc.RouteFeeRequest{
diff --git a/itest/lnd_forward_interceptor_test.go b/itest/lnd_forward_interceptor_test.go
index 9bbecd31b3..37bf8fdeda 100644
--- a/itest/lnd_forward_interceptor_test.go
+++ b/itest/lnd_forward_interceptor_test.go
@@ -42,23 +42,24 @@ type interceptorTestCase struct {
// testForwardInterceptorDedupHtlc tests that upon reconnection, duplicate
// HTLCs aren't re-notified using the HTLC interceptor API.
func testForwardInterceptorDedupHtlc(ht *lntest.HarnessTest) {
- // Initialize the test context with 3 connected nodes.
- ts := newInterceptorTestScenario(ht)
+ const chanAmt = btcutil.Amount(300000)
+ p := lntest.OpenChannelParams{Amt: chanAmt}
- alice, bob, carol := ts.alice, ts.bob, ts.carol
+ // Initialize the test context with 3 connected nodes.
+ cfgs := [][]string{nil, nil, nil}
// Open and wait for channels.
- const chanAmt = btcutil.Amount(300000)
- p := lntest.OpenChannelParams{Amt: chanAmt}
- reqs := []*lntest.OpenChannelRequest{
- {Local: alice, Remote: bob, Param: p},
- {Local: bob, Remote: carol, Param: p},
- }
- resp := ht.OpenMultiChannelsAsync(reqs)
- cpAB, cpBC := resp[0], resp[1]
+ chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, p)
+ alice, bob, carol := nodes[0], nodes[1], nodes[2]
+ cpAB := chanPoints[0]
- // Make sure Alice is aware of channel Bob=>Carol.
- ht.AssertChannelInGraph(alice, cpBC)
+ // Init the scenario.
+ ts := &interceptorTestScenario{
+ ht: ht,
+ alice: alice,
+ bob: bob,
+ carol: carol,
+ }
// Connect the interceptor.
interceptor, cancelInterceptor := bob.RPC.HtlcInterceptor()
@@ -177,10 +178,6 @@ func testForwardInterceptorDedupHtlc(ht *lntest.HarnessTest) {
case <-time.After(defaultTimeout):
require.Fail(ht, "timeout waiting for interceptor error")
}
-
- // Finally, close channels.
- ht.CloseChannel(alice, cpAB)
- ht.CloseChannel(bob, cpBC)
}
// testForwardInterceptorBasic tests the forward interceptor RPC layer.
@@ -194,22 +191,24 @@ func testForwardInterceptorDedupHtlc(ht *lntest.HarnessTest) {
// 4. When Interceptor disconnects it resumes all held htlcs, which result in
// valid payment (invoice is settled).
func testForwardInterceptorBasic(ht *lntest.HarnessTest) {
- ts := newInterceptorTestScenario(ht)
+ const chanAmt = btcutil.Amount(300000)
+ p := lntest.OpenChannelParams{Amt: chanAmt}
- alice, bob, carol := ts.alice, ts.bob, ts.carol
+ // Initialize the test context with 3 connected nodes.
+ cfgs := [][]string{nil, nil, nil}
// Open and wait for channels.
- const chanAmt = btcutil.Amount(300000)
- p := lntest.OpenChannelParams{Amt: chanAmt}
- reqs := []*lntest.OpenChannelRequest{
- {Local: alice, Remote: bob, Param: p},
- {Local: bob, Remote: carol, Param: p},
- }
- resp := ht.OpenMultiChannelsAsync(reqs)
- cpAB, cpBC := resp[0], resp[1]
+ chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, p)
+ alice, bob, carol := nodes[0], nodes[1], nodes[2]
+ cpAB := chanPoints[0]
- // Make sure Alice is aware of channel Bob=>Carol.
- ht.AssertChannelInGraph(alice, cpBC)
+ // Init the scenario.
+ ts := &interceptorTestScenario{
+ ht: ht,
+ alice: alice,
+ bob: bob,
+ carol: carol,
+ }
// Connect the interceptor.
interceptor, cancelInterceptor := bob.RPC.HtlcInterceptor()
@@ -345,32 +344,28 @@ func testForwardInterceptorBasic(ht *lntest.HarnessTest) {
case <-time.After(defaultTimeout):
require.Fail(ht, "timeout waiting for interceptor error")
}
-
- // Finally, close channels.
- ht.CloseChannel(alice, cpAB)
- ht.CloseChannel(bob, cpBC)
}
// testForwardInterceptorModifiedHtlc tests that the interceptor can modify the
// amount and custom records of an intercepted HTLC and resume it.
func testForwardInterceptorModifiedHtlc(ht *lntest.HarnessTest) {
- // Initialize the test context with 3 connected nodes.
- ts := newInterceptorTestScenario(ht)
+ const chanAmt = btcutil.Amount(300000)
+ p := lntest.OpenChannelParams{Amt: chanAmt}
- alice, bob, carol := ts.alice, ts.bob, ts.carol
+ // Initialize the test context with 3 connected nodes.
+ cfgs := [][]string{nil, nil, nil}
// Open and wait for channels.
- const chanAmt = btcutil.Amount(300000)
- p := lntest.OpenChannelParams{Amt: chanAmt}
- reqs := []*lntest.OpenChannelRequest{
- {Local: alice, Remote: bob, Param: p},
- {Local: bob, Remote: carol, Param: p},
- }
- resp := ht.OpenMultiChannelsAsync(reqs)
- cpAB, cpBC := resp[0], resp[1]
+ _, nodes := ht.CreateSimpleNetwork(cfgs, p)
+ alice, bob, carol := nodes[0], nodes[1], nodes[2]
- // Make sure Alice is aware of channel Bob=>Carol.
- ht.AssertChannelInGraph(alice, cpBC)
+ // Init the scenario.
+ ts := &interceptorTestScenario{
+ ht: ht,
+ alice: alice,
+ bob: bob,
+ carol: carol,
+ }
// Connect an interceptor to Bob's node.
bobInterceptor, cancelBobInterceptor := bob.RPC.HtlcInterceptor()
@@ -451,34 +446,21 @@ func testForwardInterceptorModifiedHtlc(ht *lntest.HarnessTest) {
var preimage lntypes.Preimage
copy(preimage[:], invoice.RPreimage)
ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED)
-
- // Finally, close channels.
- ht.CloseChannel(alice, cpAB)
- ht.CloseChannel(bob, cpBC)
}
// testForwardInterceptorWireRecords tests that the interceptor can read any
// wire custom records provided by the sender of a payment as part of the
// update_add_htlc message.
func testForwardInterceptorWireRecords(ht *lntest.HarnessTest) {
- // Initialize the test context with 3 connected nodes.
- ts := newInterceptorTestScenario(ht)
-
- alice, bob, carol, dave := ts.alice, ts.bob, ts.carol, ts.dave
-
- // Open and wait for channels.
const chanAmt = btcutil.Amount(300000)
p := lntest.OpenChannelParams{Amt: chanAmt}
- reqs := []*lntest.OpenChannelRequest{
- {Local: alice, Remote: bob, Param: p},
- {Local: bob, Remote: carol, Param: p},
- {Local: carol, Remote: dave, Param: p},
- }
- resp := ht.OpenMultiChannelsAsync(reqs)
- cpAB, cpBC, cpCD := resp[0], resp[1], resp[2]
- // Make sure Alice is aware of channel Bob=>Carol.
- ht.AssertChannelInGraph(alice, cpBC)
+ // Initialize the test context with 4 connected nodes.
+ cfgs := [][]string{nil, nil, nil, nil}
+
+ // Open and wait for channels.
+ _, nodes := ht.CreateSimpleNetwork(cfgs, p)
+ alice, bob, carol, dave := nodes[0], nodes[1], nodes[2], nodes[3]
// Connect an interceptor to Bob's node.
bobInterceptor, cancelBobInterceptor := bob.RPC.HtlcInterceptor()
@@ -508,8 +490,7 @@ func testForwardInterceptorWireRecords(ht *lntest.HarnessTest) {
FeeLimitMsat: noFeeLimitMsat,
FirstHopCustomRecords: customRecords,
}
-
- _ = alice.RPC.SendPayment(sendReq)
+ ht.SendPaymentAssertInflight(alice, sendReq)
// We start the htlc interceptor with a simple implementation that saves
// all intercepted packets. These packets are held to simulate a
@@ -580,11 +561,6 @@ func testForwardInterceptorWireRecords(ht *lntest.HarnessTest) {
return nil
},
)
-
- // Finally, close channels.
- ht.CloseChannel(alice, cpAB)
- ht.CloseChannel(bob, cpBC)
- ht.CloseChannel(carol, cpCD)
}
// testForwardInterceptorRestart tests that the interceptor can read any wire
@@ -592,25 +568,15 @@ func testForwardInterceptorWireRecords(ht *lntest.HarnessTest) {
// update_add_htlc message and that those records are persisted correctly and
// re-sent on node restart.
func testForwardInterceptorRestart(ht *lntest.HarnessTest) {
- // Initialize the test context with 3 connected nodes.
- ts := newInterceptorTestScenario(ht)
-
- alice, bob, carol, dave := ts.alice, ts.bob, ts.carol, ts.dave
-
- // Open and wait for channels.
const chanAmt = btcutil.Amount(300000)
p := lntest.OpenChannelParams{Amt: chanAmt}
- reqs := []*lntest.OpenChannelRequest{
- {Local: alice, Remote: bob, Param: p},
- {Local: bob, Remote: carol, Param: p},
- {Local: carol, Remote: dave, Param: p},
- }
- resp := ht.OpenMultiChannelsAsync(reqs)
- cpAB, cpBC, cpCD := resp[0], resp[1], resp[2]
- // Make sure Alice is aware of channels Bob=>Carol and Carol=>Dave.
- ht.AssertChannelInGraph(alice, cpBC)
- ht.AssertChannelInGraph(alice, cpCD)
+ // Initialize the test context with 4 connected nodes.
+ cfgs := [][]string{nil, nil, nil, nil}
+
+ // Open and wait for channels.
+ _, nodes := ht.CreateSimpleNetwork(cfgs, p)
+ alice, bob, carol, dave := nodes[0], nodes[1], nodes[2], nodes[3]
// Connect an interceptor to Bob's node.
bobInterceptor, cancelBobInterceptor := bob.RPC.HtlcInterceptor()
@@ -635,8 +601,7 @@ func testForwardInterceptorRestart(ht *lntest.HarnessTest) {
FeeLimitMsat: noFeeLimitMsat,
FirstHopCustomRecords: customRecords,
}
-
- _ = alice.RPC.SendPayment(sendReq)
+ ht.SendPaymentAssertInflight(alice, sendReq)
// We start the htlc interceptor with a simple implementation that saves
// all intercepted packets. These packets are held to simulate a
@@ -741,11 +706,6 @@ func testForwardInterceptorRestart(ht *lntest.HarnessTest) {
return nil
},
)
-
- // Finally, close channels.
- ht.CloseChannel(alice, cpAB)
- ht.CloseChannel(bob, cpBC)
- ht.CloseChannel(carol, cpCD)
}
// interceptorTestScenario is a helper struct to hold the test context and
@@ -755,35 +715,6 @@ type interceptorTestScenario struct {
alice, bob, carol, dave *node.HarnessNode
}
-// newInterceptorTestScenario initializes a new test scenario with three nodes
-// and connects them to have the following topology,
-//
-// Alice --> Bob --> Carol --> Dave
-//
-// Among them, Alice and Bob are standby nodes and Carol is a new node.
-func newInterceptorTestScenario(
- ht *lntest.HarnessTest) *interceptorTestScenario {
-
- alice, bob := ht.Alice, ht.Bob
- carol := ht.NewNode("carol", nil)
- dave := ht.NewNode("dave", nil)
-
- ht.EnsureConnected(alice, bob)
- ht.EnsureConnected(bob, carol)
- ht.EnsureConnected(carol, dave)
-
- // So that carol can open channels.
- ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
-
- return &interceptorTestScenario{
- ht: ht,
- alice: alice,
- bob: bob,
- carol: carol,
- dave: dave,
- }
-}
-
// prepareTestCases prepares 4 tests:
// 1. failed htlc.
// 2. resumed htlc.
diff --git a/itest/lnd_funding_test.go b/itest/lnd_funding_test.go
index 7daf95960d..1aefc360ce 100644
--- a/itest/lnd_funding_test.go
+++ b/itest/lnd_funding_test.go
@@ -25,166 +25,230 @@ import (
"github.com/stretchr/testify/require"
)
-// testBasicChannelFunding performs a test exercising expected behavior from a
-// basic funding workflow. The test creates a new channel between Alice and
-// Bob, then immediately closes the channel after asserting some expected post
-// conditions. Finally, the chain itself is checked to ensure the closing
-// transaction was mined.
-func testBasicChannelFunding(ht *lntest.HarnessTest) {
- // Run through the test with combinations of all the different
- // commitment types.
- allTypes := []lnrpc.CommitmentType{
- lnrpc.CommitmentType_STATIC_REMOTE_KEY,
- lnrpc.CommitmentType_ANCHORS,
- lnrpc.CommitmentType_SIMPLE_TAPROOT,
- }
+// basicFundingTestCases defines the test cases for the basic funding test.
+var basicFundingTestCases = []*lntest.TestCase{
+ {
+ Name: "basic funding flow static key remote",
+ TestFunc: testBasicChannelFundingStaticRemote,
+ },
+ {
+ Name: "basic funding flow anchor",
+ TestFunc: testBasicChannelFundingAnchor,
+ },
+ {
+ Name: "basic funding flow simple taproot",
+ TestFunc: testBasicChannelFundingSimpleTaproot,
+ },
+}
+
+// allFundingTypes defines the channel types to test for the basic funding
+// test.
+var allFundingTypes = []lnrpc.CommitmentType{
+ lnrpc.CommitmentType_STATIC_REMOTE_KEY,
+ lnrpc.CommitmentType_ANCHORS,
+ lnrpc.CommitmentType_SIMPLE_TAPROOT,
+}
+
+// testBasicChannelFundingStaticRemote performs a test exercising expected
+// behavior from a basic funding workflow. The test creates a new channel
+// between Carol and Dave, with Carol using the static remote key commitment
+// type, and Dave using allFundingTypes.
+func testBasicChannelFundingStaticRemote(ht *lntest.HarnessTest) {
+ carolCommitType := lnrpc.CommitmentType_STATIC_REMOTE_KEY
- // testFunding is a function closure that takes Carol and Dave's
- // commitment types and test the funding flow.
- testFunding := func(ht *lntest.HarnessTest, carolCommitType,
- daveCommitType lnrpc.CommitmentType) {
+ // We'll test all possible combinations of the feature bit presence
+ // that both nodes can signal for this new channel type. We'll make a
+ // new Carol+Dave for each test instance as well.
+ for _, daveCommitType := range allFundingTypes {
+ cc := carolCommitType
+ dc := daveCommitType
+
+ testName := fmt.Sprintf(
+ "carol_commit=%v,dave_commit=%v", cc, dc,
+ )
- // Based on the current tweak variable for Carol, we'll
- // preferentially signal the legacy commitment format. We do
- // the same for Dave shortly below.
- carolArgs := lntest.NodeArgsForCommitType(carolCommitType)
- carol := ht.NewNode("Carol", carolArgs)
+ success := ht.Run(testName, func(t *testing.T) {
+ st := ht.Subtest(t)
+ runBasicFundingTest(st, cc, dc)
+ })
- // Each time, we'll send Carol a new set of coins in order to
- // fund the channel.
- ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
+ if !success {
+ break
+ }
+ }
+}
- daveArgs := lntest.NodeArgsForCommitType(daveCommitType)
- dave := ht.NewNode("Dave", daveArgs)
+// testBasicChannelFundingAnchor performs a test exercising expected behavior
+// from a basic funding workflow. The test creates a new channel between Carol
+// and Dave, with Carol using the anchor commitment type, and Dave using
+// allFundingTypes.
+func testBasicChannelFundingAnchor(ht *lntest.HarnessTest) {
+ carolCommitType := lnrpc.CommitmentType_ANCHORS
- // Before we start the test, we'll ensure both sides are
- // connected to the funding flow can properly be executed.
- ht.EnsureConnected(carol, dave)
+ // We'll test all possible combinations of the feature bit presence
+ // that both nodes can signal for this new channel type. We'll make a
+ // new Carol+Dave for each test instance as well.
+ for _, daveCommitType := range allFundingTypes {
+ cc := carolCommitType
+ dc := daveCommitType
- var privateChan bool
+ testName := fmt.Sprintf(
+ "carol_commit=%v,dave_commit=%v", cc, dc,
+ )
- // If this is to be a taproot channel type, then it needs to be
- // private, otherwise it'll be rejected by Dave.
- //
- // TODO(roasbeef): lift after gossip 1.75
- if carolCommitType == lnrpc.CommitmentType_SIMPLE_TAPROOT {
- privateChan = true
+ success := ht.Run(testName, func(t *testing.T) {
+ st := ht.Subtest(t)
+ runBasicFundingTest(st, cc, dc)
+ })
+
+ if !success {
+ break
}
+ }
+}
+
+// testBasicChannelFundingSimpleTaproot performs a test exercising expected
+// behavior from a basic funding workflow. The test creates a new channel
+// between Carol and Dave, with Carol using the simple taproot commitment type,
+// and Dave using allFundingTypes.
+func testBasicChannelFundingSimpleTaproot(ht *lntest.HarnessTest) {
+ carolCommitType := lnrpc.CommitmentType_SIMPLE_TAPROOT
+
+ // We'll test all possible combinations of the feature bit presence
+ // that both nodes can signal for this new channel type. We'll make a
+ // new Carol+Dave for each test instance as well.
+ for _, daveCommitType := range allFundingTypes {
+ cc := carolCommitType
+ dc := daveCommitType
+
+ testName := fmt.Sprintf(
+ "carol_commit=%v,dave_commit=%v", cc, dc,
+ )
- // If carol wants taproot, but dave wants something
- // else, then we'll assert that the channel negotiation
- // attempt fails.
- if carolCommitType == lnrpc.CommitmentType_SIMPLE_TAPROOT &&
- daveCommitType != lnrpc.CommitmentType_SIMPLE_TAPROOT {
-
- expectedErr := fmt.Errorf("requested channel type " +
- "not supported")
- amt := funding.MaxBtcFundingAmount
- ht.OpenChannelAssertErr(
- carol, dave, lntest.OpenChannelParams{
- Private: privateChan,
- Amt: amt,
- CommitmentType: carolCommitType,
- }, expectedErr,
- )
-
- return
+ success := ht.Run(testName, func(t *testing.T) {
+ st := ht.Subtest(t)
+ runBasicFundingTest(st, cc, dc)
+ })
+
+ if !success {
+ break
}
+ }
+}
+
+// runBasicFundingTest is a helper function that takes Carol and Dave's
+// commitment types and test the funding flow.
+func runBasicFundingTest(ht *lntest.HarnessTest, carolCommitType,
+ daveCommitType lnrpc.CommitmentType) {
+
+ // Based on the current tweak variable for Carol, we'll preferentially
+ // signal the legacy commitment format. We do the same for Dave
+ // shortly below.
+ carolArgs := lntest.NodeArgsForCommitType(carolCommitType)
+ carol := ht.NewNode("Carol", carolArgs)
+
+ // Each time, we'll send Carol a new set of coins in order to fund the
+ // channel.
+ ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
+
+ daveArgs := lntest.NodeArgsForCommitType(daveCommitType)
+ dave := ht.NewNode("Dave", daveArgs)
- carolChan, daveChan, closeChan := basicChannelFundingTest(
- ht, carol, dave, nil, privateChan, &carolCommitType,
+ // Before we start the test, we'll ensure both sides are connected to
+ // the funding flow can properly be executed.
+ ht.EnsureConnected(carol, dave)
+
+ var privateChan bool
+
+ // If this is to be a taproot channel type, then it needs to be
+ // private, otherwise it'll be rejected by Dave.
+ //
+ // TODO(roasbeef): lift after gossip 1.75
+ if carolCommitType == lnrpc.CommitmentType_SIMPLE_TAPROOT {
+ privateChan = true
+ }
+
+ // If carol wants taproot, but dave wants something else, then we'll
+ // assert that the channel negotiation attempt fails.
+ if carolCommitType == lnrpc.CommitmentType_SIMPLE_TAPROOT &&
+ daveCommitType != lnrpc.CommitmentType_SIMPLE_TAPROOT {
+
+ expectedErr := fmt.Errorf("requested channel type " +
+ "not supported")
+ amt := funding.MaxBtcFundingAmount
+ ht.OpenChannelAssertErr(
+ carol, dave, lntest.OpenChannelParams{
+ Private: privateChan,
+ Amt: amt,
+ CommitmentType: carolCommitType,
+ }, expectedErr,
)
- // Both nodes should report the same commitment
- // type.
- chansCommitType := carolChan.CommitmentType
- require.Equal(ht, chansCommitType, daveChan.CommitmentType,
- "commit types don't match")
+ return
+ }
- // Now check that the commitment type reported by both nodes is
- // what we expect. It will be the minimum of the two nodes'
- // preference, in the order Legacy, Tweakless, Anchors.
- expType := carolCommitType
+ carolChan, daveChan := basicChannelFundingTest(
+ ht, carol, dave, nil, privateChan, &carolCommitType,
+ )
- switch daveCommitType {
- // Dave supports taproot, type will be what Carol supports.
- case lnrpc.CommitmentType_SIMPLE_TAPROOT:
+ // Both nodes should report the same commitment type.
+ chansCommitType := carolChan.CommitmentType
+ require.Equal(ht, chansCommitType, daveChan.CommitmentType,
+ "commit types don't match")
+
+ // Now check that the commitment type reported by both nodes is what we
+ // expect. It will be the minimum of the two nodes' preference, in the
+ // order Legacy, Tweakless, Anchors.
+ expType := carolCommitType
+
+ switch daveCommitType {
+ // Dave supports taproot, type will be what Carol supports.
+ case lnrpc.CommitmentType_SIMPLE_TAPROOT:
+
+ // Dave supports anchors, type will be what Carol supports.
+ case lnrpc.CommitmentType_ANCHORS:
+ // However if Alice wants taproot chans, then we downgrade to
+ // anchors as this is still using implicit negotiation.
+ if expType == lnrpc.CommitmentType_SIMPLE_TAPROOT {
+ expType = lnrpc.CommitmentType_ANCHORS
+ }
- // Dave supports anchors, type will be what Carol supports.
+ // Dave only supports tweakless, channel will be downgraded to this
+ // type if Carol supports anchors.
+ case lnrpc.CommitmentType_STATIC_REMOTE_KEY:
+ switch expType {
case lnrpc.CommitmentType_ANCHORS:
- // However if Alice wants taproot chans, then we
- // downgrade to anchors as this is still using implicit
- // negotiation.
- if expType == lnrpc.CommitmentType_SIMPLE_TAPROOT {
- expType = lnrpc.CommitmentType_ANCHORS
- }
-
- // Dave only supports tweakless, channel will be downgraded to
- // this type if Carol supports anchors.
- case lnrpc.CommitmentType_STATIC_REMOTE_KEY:
- switch expType {
- case lnrpc.CommitmentType_ANCHORS:
- expType = lnrpc.CommitmentType_STATIC_REMOTE_KEY
- case lnrpc.CommitmentType_SIMPLE_TAPROOT:
- expType = lnrpc.CommitmentType_STATIC_REMOTE_KEY
- }
-
- // Dave only supports legacy type, channel will be downgraded
- // to this type.
- case lnrpc.CommitmentType_LEGACY:
- expType = lnrpc.CommitmentType_LEGACY
-
- default:
- ht.Fatalf("invalid commit type %v", daveCommitType)
+ expType = lnrpc.CommitmentType_STATIC_REMOTE_KEY
+ case lnrpc.CommitmentType_SIMPLE_TAPROOT:
+ expType = lnrpc.CommitmentType_STATIC_REMOTE_KEY
}
- // Check that the signalled type matches what we expect.
- switch {
- case expType == lnrpc.CommitmentType_ANCHORS &&
- chansCommitType == lnrpc.CommitmentType_ANCHORS:
+ // Dave only supports legacy type, channel will be downgraded to this
+ // type.
+ case lnrpc.CommitmentType_LEGACY:
+ expType = lnrpc.CommitmentType_LEGACY
- case expType == lnrpc.CommitmentType_STATIC_REMOTE_KEY &&
- chansCommitType == lnrpc.CommitmentType_STATIC_REMOTE_KEY: //nolint:lll
+ default:
+ ht.Fatalf("invalid commit type %v", daveCommitType)
+ }
- case expType == lnrpc.CommitmentType_LEGACY &&
- chansCommitType == lnrpc.CommitmentType_LEGACY:
+ // Check that the signalled type matches what we expect.
+ switch {
+ case expType == lnrpc.CommitmentType_ANCHORS &&
+ chansCommitType == lnrpc.CommitmentType_ANCHORS:
- case expType == lnrpc.CommitmentType_SIMPLE_TAPROOT &&
- chansCommitType == lnrpc.CommitmentType_SIMPLE_TAPROOT:
+ case expType == lnrpc.CommitmentType_STATIC_REMOTE_KEY &&
+ chansCommitType == lnrpc.CommitmentType_STATIC_REMOTE_KEY:
- default:
- ht.Fatalf("expected nodes to signal "+
- "commit type %v, instead got "+
- "%v", expType, chansCommitType)
- }
+ case expType == lnrpc.CommitmentType_LEGACY &&
+ chansCommitType == lnrpc.CommitmentType_LEGACY:
- // As we've concluded this sub-test case we'll now close out
- // the channel for both sides.
- closeChan()
- }
+ case expType == lnrpc.CommitmentType_SIMPLE_TAPROOT &&
+ chansCommitType == lnrpc.CommitmentType_SIMPLE_TAPROOT:
-test:
- // We'll test all possible combinations of the feature bit presence
- // that both nodes can signal for this new channel type. We'll make a
- // new Carol+Dave for each test instance as well.
- for _, carolCommitType := range allTypes {
- for _, daveCommitType := range allTypes {
- cc := carolCommitType
- dc := daveCommitType
-
- testName := fmt.Sprintf(
- "carol_commit=%v,dave_commit=%v", cc, dc,
- )
-
- success := ht.Run(testName, func(t *testing.T) {
- st := ht.Subtest(t)
- testFunding(st, cc, dc)
- })
-
- if !success {
- break test
- }
- }
+ default:
+ ht.Fatalf("expected nodes to signal commit type %v, instead "+
+ "got %v", expType, chansCommitType)
}
}
@@ -195,7 +259,7 @@ test:
func basicChannelFundingTest(ht *lntest.HarnessTest,
alice, bob *node.HarnessNode, fundingShim *lnrpc.FundingShim,
privateChan bool, commitType *lnrpc.CommitmentType) (*lnrpc.Channel,
- *lnrpc.Channel, func()) {
+ *lnrpc.Channel) {
chanAmt := funding.MaxBtcFundingAmount
pushAmt := btcutil.Amount(100000)
@@ -267,14 +331,7 @@ func basicChannelFundingTest(ht *lntest.HarnessTest,
aliceChannel := ht.GetChannelByChanPoint(alice, chanPoint)
bobChannel := ht.GetChannelByChanPoint(bob, chanPoint)
- closeChan := func() {
- // Finally, immediately close the channel. This function will
- // also block until the channel is closed and will additionally
- // assert the relevant channel closing post conditions.
- ht.CloseChannel(alice, chanPoint)
- }
-
- return aliceChannel, bobChannel, closeChan
+ return aliceChannel, bobChannel
}
// testUnconfirmedChannelFunding tests that our unconfirmed change outputs can
@@ -287,8 +344,7 @@ func testUnconfirmedChannelFunding(ht *lntest.HarnessTest) {
// We'll start off by creating a node for Carol.
carol := ht.NewNode("Carol", nil)
-
- alice := ht.Alice
+ alice := ht.NewNode("Alice", nil)
// We'll send her some unconfirmed funds.
ht.FundCoinsUnconfirmed(2*chanAmt, carol)
@@ -383,38 +439,27 @@ func testUnconfirmedChannelFunding(ht *lntest.HarnessTest) {
// spend and the funding tx.
ht.MineBlocksAndAssertNumTxes(6, 2)
- chanPoint := ht.WaitForChannelOpenEvent(chanOpenUpdate)
+ ht.WaitForChannelOpenEvent(chanOpenUpdate)
// With the channel open, we'll check the balances on each side of the
// channel as a sanity check to ensure things worked out as intended.
checkChannelBalance(carol, carolLocalBalance, pushAmt, 0, 0)
checkChannelBalance(alice, pushAmt, carolLocalBalance, 0, 0)
-
- // TODO(yy): remove the sleep once the following bug is fixed.
- //
- // We may get the error `unable to gracefully close channel while peer
- // is offline (try force closing it instead): channel link not found`.
- // This happens because the channel link hasn't been added yet but we
- // now proceed to closing the channel. We may need to revisit how the
- // channel open event is created and make sure the event is only sent
- // after all relevant states have been updated.
- time.Sleep(2 * time.Second)
-
- // Now that we're done with the test, the channel can be closed.
- ht.CloseChannel(carol, chanPoint)
}
// testChannelFundingInputTypes tests that any type of supported input type can
// be used to fund channels.
func testChannelFundingInputTypes(ht *lntest.HarnessTest) {
+ alice := ht.NewNode("Alice", nil)
+
// We'll start off by creating a node for Carol.
carol := ht.NewNode("Carol", nil)
// Now, we'll connect her to Alice so that they can open a
// channel together.
- ht.ConnectNodes(carol, ht.Alice)
+ ht.ConnectNodes(carol, alice)
- runChannelFundingInputTypes(ht, ht.Alice, carol)
+ runChannelFundingInputTypes(ht, alice, carol)
}
// runChannelFundingInputTypes tests that any type of supported input type can
@@ -606,7 +651,7 @@ func runExternalFundingScriptEnforced(ht *lntest.HarnessTest) {
// At this point, we'll now carry out the normal basic channel funding
// test as everything should now proceed as normal (a regular channel
// funding flow).
- carolChan, daveChan, _ := basicChannelFundingTest(
+ carolChan, daveChan := basicChannelFundingTest(
ht, carol, dave, fundingShim2, false, nil,
)
@@ -722,7 +767,7 @@ func runExternalFundingTaproot(ht *lntest.HarnessTest) {
// At this point, we'll now carry out the normal basic channel funding
// test as everything should now proceed as normal (a regular channel
// funding flow).
- carolChan, daveChan, _ := basicChannelFundingTest(
+ carolChan, daveChan := basicChannelFundingTest(
ht, carol, dave, fundingShim2, true, &commitmentType,
)
@@ -844,7 +889,7 @@ func testChannelFundingPersistence(ht *lntest.HarnessTest) {
}
carol := ht.NewNode("Carol", carolArgs)
- alice := ht.Alice
+ alice := ht.NewNodeWithCoins("Alice", nil)
ht.ConnectNodes(alice, carol)
// Create a new channel that requires 5 confs before it's considered
@@ -935,11 +980,6 @@ func testChannelFundingPersistence(ht *lntest.HarnessTest) {
shortChanID := lnwire.NewShortChanIDFromInt(chanAlice.ChanId)
label = labels.MakeLabel(labels.LabelTypeChannelOpen, &shortChanID)
require.Equal(ht, label, tx.Label, "open channel label not updated")
-
- // Finally, immediately close the channel. This function will also
- // block until the channel is closed and will additionally assert the
- // relevant channel closing post conditions.
- ht.CloseChannel(alice, chanPoint)
}
// testBatchChanFunding makes sure multiple channels can be opened in one batch
@@ -960,8 +1000,8 @@ func testBatchChanFunding(ht *lntest.HarnessTest) {
}
eve := ht.NewNode("eve", scidAliasArgs)
- alice, bob := ht.Alice, ht.Bob
- ht.RestartNodeWithExtraArgs(alice, scidAliasArgs)
+ alice := ht.NewNodeWithCoins("Alice", scidAliasArgs)
+ bob := ht.NewNodeWithCoins("Bob", nil)
// Before we start the test, we'll ensure Alice is connected to Carol
// and Dave, so she can open channels to both of them (and Bob).
@@ -1132,15 +1172,6 @@ func testBatchChanFunding(ht *lntest.HarnessTest) {
chainreg.DefaultBitcoinBaseFeeMSat,
chainreg.DefaultBitcoinFeeRate,
)
-
- // To conclude, we'll close the newly created channel between Carol and
- // Dave. This function will also block until the channel is closed and
- // will additionally assert the relevant channel closing post
- // conditions.
- ht.CloseChannel(alice, chanPoint1)
- ht.CloseChannel(alice, chanPoint2)
- ht.CloseChannel(alice, chanPoint3)
- ht.CloseChannel(alice, chanPoint4)
}
// ensurePolicy ensures that the peer sees alice's channel fee settings.
@@ -1297,6 +1328,7 @@ func testChannelFundingWithUnstableUtxos(ht *lntest.HarnessTest) {
// First, we'll create two new nodes that we'll use to open channel
// between for this test.
carol := ht.NewNode("carol", nil)
+
// We'll attempt at max 2 pending channels, so Dave will need to accept
// two pending ones.
dave := ht.NewNode("dave", []string{
@@ -1329,13 +1361,12 @@ func testChannelFundingWithUnstableUtxos(ht *lntest.HarnessTest) {
// Open a channel to dave with an unconfirmed utxo. Although this utxo
// is unconfirmed it can be used to open a channel because it did not
// originated from the sweeper subsystem.
- update := ht.OpenChannelAssertPending(carol, dave,
+ ht.OpenChannelAssertPending(carol, dave,
lntest.OpenChannelParams{
Amt: chanSize,
SpendUnconfirmed: true,
CommitmentType: cType,
})
- chanPoint1 := lntest.ChanPointFromPendingUpdate(update)
// Verify that both nodes know about the channel.
ht.AssertNumPendingOpenChannels(carol, 1)
@@ -1347,7 +1378,7 @@ func testChannelFundingWithUnstableUtxos(ht *lntest.HarnessTest) {
// so unconfirmed utxos originated from prior channel opening are safe
// to use because channel opening should not be RBFed, at least not for
// now.
- update = ht.OpenChannelAssertPending(carol, dave,
+ update := ht.OpenChannelAssertPending(carol, dave,
lntest.OpenChannelParams{
Amt: chanSize,
SpendUnconfirmed: true,
@@ -1375,9 +1406,6 @@ func testChannelFundingWithUnstableUtxos(ht *lntest.HarnessTest) {
// Make sure Carol sees her to_remote output from the force close tx.
ht.AssertNumPendingSweeps(carol, 1)
- // Mine one block to trigger the sweep transaction.
- ht.MineEmptyBlocks(1)
-
// We need to wait for carol initiating the sweep of the to_remote
// output of chanPoint2.
utxo := ht.AssertNumUTXOsUnconfirmed(carol, 1)[0]
@@ -1435,9 +1463,6 @@ func testChannelFundingWithUnstableUtxos(ht *lntest.HarnessTest) {
// Make sure Carol sees her to_remote output from the force close tx.
ht.AssertNumPendingSweeps(carol, 1)
- // Mine one block to trigger the sweep transaction.
- ht.MineEmptyBlocks(1)
-
// Wait for the to_remote sweep tx to show up in carol's wallet.
ht.AssertNumUTXOsUnconfirmed(carol, 1)
@@ -1472,20 +1497,16 @@ func testChannelFundingWithUnstableUtxos(ht *lntest.HarnessTest) {
// Now after the sweep utxo is confirmed it is stable and can be used
// for channel openings again.
- update = ht.OpenChannelAssertPending(carol, dave,
+ ht.OpenChannelAssertPending(carol, dave,
lntest.OpenChannelParams{
Amt: chanSize,
SpendUnconfirmed: true,
CommitmentType: cType,
})
- chanPoint4 := lntest.ChanPointFromPendingUpdate(update)
// Verify that both nodes know about the channel.
ht.AssertNumPendingOpenChannels(carol, 1)
ht.AssertNumPendingOpenChannels(dave, 1)
ht.MineBlocksAndAssertNumTxes(1, 1)
-
- ht.CloseChannel(carol, chanPoint1)
- ht.CloseChannel(carol, chanPoint4)
}
diff --git a/itest/lnd_hold_invoice_force_test.go b/itest/lnd_hold_invoice_force_test.go
index 7c616e7a50..5fe8ea0daa 100644
--- a/itest/lnd_hold_invoice_force_test.go
+++ b/itest/lnd_hold_invoice_force_test.go
@@ -17,10 +17,11 @@ import (
// would otherwise trigger force closes when they expire.
func testHoldInvoiceForceClose(ht *lntest.HarnessTest) {
// Open a channel between alice and bob.
- alice, bob := ht.Alice, ht.Bob
- chanPoint := ht.OpenChannel(
- alice, bob, lntest.OpenChannelParams{Amt: 300000},
+ chanPoints, nodes := ht.CreateSimpleNetwork(
+ [][]string{nil, nil}, lntest.OpenChannelParams{Amt: 300000},
)
+ alice, bob := nodes[0], nodes[1]
+ chanPoint := chanPoints[0]
// Create a non-dust hold invoice for bob.
var (
@@ -29,7 +30,7 @@ func testHoldInvoiceForceClose(ht *lntest.HarnessTest) {
)
invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
Value: 30000,
- CltvExpiry: 40,
+ CltvExpiry: finalCltvDelta,
Hash: payHash[:],
}
bobInvoice := bob.RPC.AddHoldInvoice(invoiceReq)
@@ -44,14 +45,18 @@ func testHoldInvoiceForceClose(ht *lntest.HarnessTest) {
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
}
- alice.RPC.SendPayment(req)
+ ht.SendPaymentAssertInflight(alice, req)
ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED)
// Once the HTLC has cleared, alice and bob should both have a single
// htlc locked in.
- ht.AssertActiveHtlcs(alice, payHash[:])
- ht.AssertActiveHtlcs(bob, payHash[:])
+ //
+ // Alice should have one outgoing HTLCs on channel Alice -> Bob.
+ ht.AssertOutgoingHTLCActive(alice, chanPoint, payHash[:])
+
+ // Bob should have one incoming HTLC on channel Alice -> Bob.
+ ht.AssertIncomingHTLCActive(bob, chanPoint, payHash[:])
// Get our htlc expiry height and current block height so that we
// can mine the exact number of blocks required to expire the htlc.
@@ -135,7 +140,4 @@ func testHoldInvoiceForceClose(ht *lntest.HarnessTest) {
// outgoing HTLCs in her channel as the only HTLC has already been
// canceled.
ht.AssertNumPendingForceClose(alice, 0)
-
- // Clean up the channel.
- ht.CloseChannel(alice, chanPoint)
}
diff --git a/itest/lnd_hold_persistence_test.go b/itest/lnd_hold_persistence_test.go
index f1c2a7d823..e8a273f022 100644
--- a/itest/lnd_hold_persistence_test.go
+++ b/itest/lnd_hold_persistence_test.go
@@ -28,12 +28,14 @@ func testHoldInvoicePersistence(ht *lntest.HarnessTest) {
carol := ht.NewNode("Carol", nil)
// Connect Alice to Carol.
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNode("Bob", nil)
+ ht.EnsureConnected(alice, bob)
ht.ConnectNodes(alice, carol)
// Open a channel between Alice and Carol which is private so that we
// cover the addition of hop hints for hold invoices.
- chanPointAlice := ht.OpenChannel(
+ ht.OpenChannel(
alice, carol, lntest.OpenChannelParams{
Amt: chanAmt,
Private: true,
@@ -220,8 +222,4 @@ func testHoldInvoicePersistence(ht *lntest.HarnessTest) {
"wrong failure reason")
}
}
-
- // Finally, close all channels.
- ht.CloseChannel(alice, chanPointBob)
- ht.CloseChannel(alice, chanPointAlice)
}
diff --git a/itest/lnd_htlc_test.go b/itest/lnd_htlc_test.go
index fc04fe8d0f..e5825993f9 100644
--- a/itest/lnd_htlc_test.go
+++ b/itest/lnd_htlc_test.go
@@ -14,7 +14,7 @@ import (
func testLookupHtlcResolution(ht *lntest.HarnessTest) {
const chanAmt = btcutil.Amount(1000000)
- alice := ht.Alice
+ alice := ht.NewNodeWithCoins("Alice", nil)
carol := ht.NewNode("Carol", []string{
"--store-final-htlc-resolutions",
})
@@ -24,7 +24,6 @@ func testLookupHtlcResolution(ht *lntest.HarnessTest) {
cp := ht.OpenChannel(
alice, carol, lntest.OpenChannelParams{Amt: chanAmt},
)
- defer ht.CloseChannel(alice, cp)
// Channel should be ready for payments.
const payAmt = 100
diff --git a/itest/lnd_htlc_timeout_resolver_test.go b/itest/lnd_htlc_timeout_resolver_test.go
new file mode 100644
index 0000000000..cd1dd3d483
--- /dev/null
+++ b/itest/lnd_htlc_timeout_resolver_test.go
@@ -0,0 +1,381 @@
+package itest
+
+import (
+ "github.com/btcsuite/btcd/btcutil"
+ "github.com/lightningnetwork/lnd/chainreg"
+ "github.com/lightningnetwork/lnd/lncfg"
+ "github.com/lightningnetwork/lnd/lnrpc"
+ "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
+ "github.com/lightningnetwork/lnd/lnrpc/routerrpc"
+ "github.com/lightningnetwork/lnd/lntest"
+ "github.com/lightningnetwork/lnd/lntest/node"
+ "github.com/lightningnetwork/lnd/routing"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ finalCltvDelta = routing.MinCLTVDelta // 18.
+ thawHeightDelta = finalCltvDelta * 2 // 36.
+)
+
+// makeRouteHints creates a route hints that will allow Carol to be reached
+// using an unadvertised channel created by Bob (Bob -> Carol). If the zeroConf
+// bool is set, then the scid alias of Bob will be used in place.
+func makeRouteHints(bob, carol *node.HarnessNode,
+ zeroConf bool) []*lnrpc.RouteHint {
+
+ carolChans := carol.RPC.ListChannels(
+ &lnrpc.ListChannelsRequest{},
+ )
+
+ carolChan := carolChans.Channels[0]
+
+ hopHint := &lnrpc.HopHint{
+ NodeId: carolChan.RemotePubkey,
+ ChanId: carolChan.ChanId,
+ FeeBaseMsat: uint32(
+ chainreg.DefaultBitcoinBaseFeeMSat,
+ ),
+ FeeProportionalMillionths: uint32(
+ chainreg.DefaultBitcoinFeeRate,
+ ),
+ CltvExpiryDelta: chainreg.DefaultBitcoinTimeLockDelta,
+ }
+
+ if zeroConf {
+ bobChans := bob.RPC.ListChannels(
+ &lnrpc.ListChannelsRequest{},
+ )
+
+ // Now that we have Bob's channels, scan for the channel he has
+ // open to Carol so we can use the proper scid.
+ var found bool
+ for _, bobChan := range bobChans.Channels {
+ if bobChan.RemotePubkey == carol.PubKeyStr {
+ hopHint.ChanId = bobChan.AliasScids[0]
+
+ found = true
+
+ break
+ }
+ }
+ if !found {
+ bob.Fatalf("unable to create route hint")
+ }
+ }
+
+ return []*lnrpc.RouteHint{
+ {
+ HopHints: []*lnrpc.HopHint{hopHint},
+ },
+ }
+}
+
+// testHtlcTimeoutResolverExtractPreimageRemote tests that in the multi-hop
+// setting, Alice->Bob->Carol, when Bob's outgoing HTLC is swept by Carol using
+// the 2nd level success tx2nd level success tx, Bob's timeout resolver will
+// extract the preimage from the sweep tx found in mempool. The 2nd level
+// success tx is broadcast by Carol and spends the outpoint on her commit tx.
+func testHtlcTimeoutResolverExtractPreimageRemote(ht *lntest.HarnessTest) {
+ // For neutrino backend there's no mempool source so we skip it. The
+ // test of extracting preimage from blocks has already been covered in
+ // other tests.
+ if ht.IsNeutrinoBackend() {
+ ht.Skip("skipping neutrino")
+ }
+
+ // Set the min relay feerate to be 10 sat/vbyte so the non-CPFP anchor
+ // is never swept.
+ //
+ // TODO(yy): delete this line once the normal anchor sweeping is
+ // removed.
+ ht.SetMinRelayFeerate(10_000)
+
+ // Create a three hop network: Alice -> Bob -> Carol, using
+ // anchor channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{Amt: chanAmt}
+ cfg := node.CfgAnchor
+ cfgs := [][]string{cfg, cfg, cfg}
+
+ // First, we'll create a three hop network: Alice -> Bob -> Carol, with
+ // Carol refusing to actually settle or directly cancel any HTLC's
+ // self.
+ chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params)
+ alice, bob, carol := nodes[0], nodes[1], nodes[2]
+ aliceChanPoint, bobChanPoint := chanPoints[0], chanPoints[1]
+
+ ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
+
+ // With the network active, we'll now add a new hodl invoice at Carol's
+ // end. Make sure the cltv expiry delta is large enough, otherwise Bob
+ // won't send out the outgoing htlc.
+ preimage := ht.RandomPreimage()
+ payHash := preimage.Hash()
+ invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
+ Value: 100_000,
+ CltvExpiry: finalCltvDelta,
+ Hash: payHash[:],
+ }
+ eveInvoice := carol.RPC.AddHoldInvoice(invoiceReq)
+
+ // Subscribe the invoice.
+ stream := carol.RPC.SubscribeSingleInvoice(payHash[:])
+
+ // Now that we've created the invoice, we'll send a single payment from
+ // Alice to Carol. We won't wait for the response however, as Carol
+ // will not immediately settle the payment.
+ req := &routerrpc.SendPaymentRequest{
+ PaymentRequest: eveInvoice.PaymentRequest,
+ TimeoutSeconds: 60,
+ FeeLimitMsat: noFeeLimitMsat,
+ }
+ ht.SendPaymentAssertInflight(alice, req)
+
+ // Once the payment sent, Alice should have one outgoing HTLC active.
+ ht.AssertOutgoingHTLCActive(alice, aliceChanPoint, payHash[:])
+
+ // Bob should have two HTLCs active. One incoming HTLC from Alice, and
+ // one outgoing to Carol.
+ ht.AssertIncomingHTLCActive(bob, aliceChanPoint, payHash[:])
+ htlc := ht.AssertOutgoingHTLCActive(bob, bobChanPoint, payHash[:])
+
+ // Carol should have one incoming HTLC from Bob.
+ ht.AssertIncomingHTLCActive(carol, bobChanPoint, payHash[:])
+
+ // Wait for Carol to mark invoice as accepted. There is a small gap to
+ // bridge between adding the htlc to the channel and executing the exit
+ // hop logic.
+ ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED)
+
+ // Bob now goes offline so the link between Bob and Carol is broken.
+ restartBob := ht.SuspendNode(bob)
+
+ // Carol now settles the invoice, since her link with Bob is broken,
+ // Bob won't know the preimage.
+ carol.RPC.SettleInvoice(preimage[:])
+
+ // We'll now mine enough blocks to trigger Carol's broadcast of her
+ // commitment transaction due to the fact that the HTLC is about to
+ // timeout. With the default incoming broadcast delta of 10, this
+ // will be the htlc expiry height minus 10.
+ numBlocks := padCLTV(uint32(
+ invoiceReq.CltvExpiry - incomingBroadcastDelta,
+ ))
+ ht.MineBlocks(int(numBlocks))
+
+ // Mine the two txns made from Carol,
+ // - the force close tx.
+ // - the anchor sweeping tx.
+ ht.MineBlocksAndAssertNumTxes(1, 2)
+
+ // With the closing transaction confirmed, we should expect Carol's
+ // HTLC success transaction to be offered to the sweeper. along with her
+ // anchor output. Note that the anchor output is uneconomical to sweep.
+ ht.AssertNumPendingSweeps(carol, 1)
+
+ // We should now have Carol's htlc success tx in the mempool.
+ ht.AssertNumTxsInMempool(1)
+
+ // Restart Bob. Once he finishes syncing the channel state, he should
+ // notice the force close from Carol.
+ require.NoError(ht, restartBob())
+
+ // Get the current height to compute number of blocks to mine to
+ // trigger the htlc timeout resolver from Bob.
+ height := ht.CurrentHeight()
+
+ // We'll now mine enough blocks to trigger Bob's timeout resolver.
+ numBlocks = htlc.ExpirationHeight - height -
+ lncfg.DefaultOutgoingBroadcastDelta
+
+ // Mine empty blocks so Carol's htlc success tx stays in mempool. Once
+ // the height is reached, Bob's timeout resolver will resolve the htlc
+ // by extracing the preimage from the mempool.
+ ht.MineEmptyBlocks(int(numBlocks))
+
+ // Finally, check that the Alice's payment is marked as succeeded as
+ // Bob has settled the htlc using the preimage extracted from Carol's
+ // 2nd level success tx.
+ ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED)
+
+ // Mine a block to clean the mempool.
+ ht.MineBlocksAndAssertNumTxes(1, 2)
+
+ // NOTE: for non-standby nodes there's no need to clean up the force
+ // close as long as the mempool is cleaned.
+ ht.CleanShutDown()
+}
+
+// testHtlcTimeoutResolverExtractPreimage tests that in the multi-hop setting,
+// Alice->Bob->Carol, when Bob's outgoing HTLC is swept by Carol using the
+// direct preimage spend, Bob's timeout resolver will extract the preimage from
+// the sweep tx found in mempool. The direct spend tx is broadcast by Carol and
+// spends the outpoint on Bob's commit tx.
+func testHtlcTimeoutResolverExtractPreimageLocal(ht *lntest.HarnessTest) {
+ // For neutrino backend there's no mempool source so we skip it. The
+ // test of extracting preimage from blocks has already been covered in
+ // other tests.
+ if ht.IsNeutrinoBackend() {
+ ht.Skip("skipping neutrino")
+ }
+
+ // Set the min relay feerate to be 10 sat/vbyte so the non-CPFP anchor
+ // is never swept.
+ //
+ // TODO(yy): delete this line once the normal anchor sweeping is
+ // removed.
+ ht.SetMinRelayFeerate(10_000)
+
+ // Create a three hop network: Alice -> Bob -> Carol, using
+ // anchor channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{Amt: chanAmt}
+ cfg := node.CfgAnchor
+ cfgs := [][]string{cfg, cfg, cfg}
+
+ // First, we'll create a three hop network: Alice -> Bob -> Carol, with
+ // Carol refusing to actually settle or directly cancel any HTLC's
+ // self.
+ // Create a three hop network: Alice -> Bob -> Carol.
+ chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params)
+ alice, bob, carol := nodes[0], nodes[1], nodes[2]
+ aliceChanPoint, bobChanPoint := chanPoints[0], chanPoints[1]
+
+ // With the network active, we'll now add a new hodl invoice at Carol's
+ // end. Make sure the cltv expiry delta is large enough, otherwise Bob
+ // won't send out the outgoing htlc.
+ preimage := ht.RandomPreimage()
+ payHash := preimage.Hash()
+ invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
+ Value: 100_000,
+ CltvExpiry: finalCltvDelta,
+ Hash: payHash[:],
+ }
+ carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq)
+
+ // Record the height which the invoice will expire.
+ invoiceExpiry := ht.CurrentHeight() + uint32(invoiceReq.CltvExpiry)
+
+ // Subscribe the invoice.
+ stream := carol.RPC.SubscribeSingleInvoice(payHash[:])
+
+ // Now that we've created the invoice, we'll send a single payment from
+ // Alice to Carol. We won't wait for the response however, as Carol
+ // will not immediately settle the payment.
+ req := &routerrpc.SendPaymentRequest{
+ PaymentRequest: carolInvoice.PaymentRequest,
+ TimeoutSeconds: 60,
+ FeeLimitMsat: noFeeLimitMsat,
+ }
+ ht.SendPaymentAssertInflight(alice, req)
+
+ // Once the payment sent, Alice should have one outgoing HTLC active.
+ ht.AssertOutgoingHTLCActive(alice, aliceChanPoint, payHash[:])
+
+ // Bob should have two HTLCs active. One incoming HTLC from Alice, and
+ // one outgoing to Carol.
+ ht.AssertIncomingHTLCActive(bob, aliceChanPoint, payHash[:])
+ ht.AssertOutgoingHTLCActive(bob, bobChanPoint, payHash[:])
+
+ // Carol should have one incoming HTLC from Bob.
+ ht.AssertIncomingHTLCActive(carol, bobChanPoint, payHash[:])
+
+ // Wait for Carol to mark invoice as accepted. There is a small gap to
+ // bridge between adding the htlc to the channel and executing the exit
+ // hop logic.
+ ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED)
+
+ // Bob now goes offline so the link between Bob and Carol is broken.
+ restartBob := ht.SuspendNode(bob)
+
+ // Carol now settles the invoice, since her link with Bob is broken,
+ // Bob won't know the preimage.
+ carol.RPC.SettleInvoice(preimage[:])
+
+ // Stop Carol so it's easier to check the mempool's state since she
+ // will broadcast the anchor sweeping once Bob force closes.
+ restartCarol := ht.SuspendNode(carol)
+
+ // Restart Bob to force close the channel.
+ require.NoError(ht, restartBob())
+
+ // Bob force closes the channel, which gets his commitment tx into the
+ // mempool.
+ ht.CloseChannelAssertPending(bob, bobChanPoint, true)
+
+ // Mine Bob's force close tx.
+ ht.MineClosingTx(bobChanPoint)
+
+ // Once Bob's force closing tx is confirmed, he will re-offer the
+ // anchor output to his sweeper, which won't be swept due to it being
+ // uneconomical.
+ ht.AssertNumPendingSweeps(bob, 1)
+
+ // Mine 3 blocks so the output will be offered to the sweeper.
+ ht.MineBlocks(defaultCSV - 1)
+
+ // Bob should have two pending sweeps now,
+ // - the commit output.
+ // - the anchor output, uneconomical.
+ ht.AssertNumPendingSweeps(bob, 2)
+
+ // Mine a block to confirm Bob's sweeping tx.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+
+ ht.Logf("Invoice expire height: %d, current: %d", invoiceExpiry,
+ ht.CurrentHeight())
+
+ // We'll now mine enough blocks to trigger Carol's sweeping of the htlc
+ // via the direct spend.
+ numBlocks := padCLTV(
+ invoiceExpiry - ht.CurrentHeight() - incomingBroadcastDelta,
+ )
+ ht.MineBlocks(int(numBlocks))
+
+ // Restart Carol to sweep the htlc output.
+ require.NoError(ht, restartCarol())
+
+ // With the above blocks mined, we should expect Carol's to offer the
+ // htlc output on Bob's commitment to the sweeper.
+ //
+ // Carol should two pending sweeps,
+ // - htlc output.
+ // - anchor output, uneconomical.
+ ht.AssertNumPendingSweeps(carol, 2)
+
+ // Check the current mempool state and we should see,
+ // - Carol's direct spend tx, which contains the preimage.
+ // - Carol's anchor sweep tx cannot be broadcast as it's uneconomical.
+ ht.AssertNumTxsInMempool(1)
+
+ // We'll now mine enough blocks to trigger Bob's htlc timeout resolver
+ // to act. Once his timeout resolver starts, it will extract the
+ // preimage from Carol's direct spend tx found in the mempool.
+ resp := ht.AssertNumPendingForceClose(bob, 1)[0]
+ require.Equal(ht, 1, len(resp.PendingHtlcs))
+
+ ht.Logf("Bob's timelock to_local output=%v, timelock on second stage "+
+ "htlc=%v", resp.BlocksTilMaturity,
+ resp.PendingHtlcs[0].BlocksTilMaturity)
+
+ // Mine empty blocks so Carol's direct spend tx stays in mempool. Once
+ // the height is reached, Bob's timeout resolver will resolve the htlc
+ // by extracing the preimage from the mempool.
+ //
+ // TODO(yy): there's no need to wait till the HTLC's CLTV is reached,
+ // Bob's outgoing contest resolver can also monitor the mempool and
+ // resolve the payment even earlier.
+ ht.MineEmptyBlocks(int(resp.PendingHtlcs[0].BlocksTilMaturity))
+
+ // Finally, check that the Alice's payment is marked as succeeded as
+ // Bob has settled the htlc using the preimage extracted from Carol's
+ // direct spend tx.
+ ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED)
+
+ // NOTE: for non-standby nodes there's no need to clean up the force
+ // close as long as the mempool is cleaned.
+ ht.CleanShutDown()
+}
diff --git a/itest/lnd_invoice_acceptor_test.go b/itest/lnd_invoice_acceptor_test.go
index 97d14650c1..273402c67d 100644
--- a/itest/lnd_invoice_acceptor_test.go
+++ b/itest/lnd_invoice_acceptor_test.go
@@ -32,7 +32,7 @@ func testInvoiceHtlcModifierBasic(ht *lntest.HarnessTest) {
{Local: bob, Remote: carol, Param: p},
}
resp := ht.OpenMultiChannelsAsync(reqs)
- cpAB, cpBC := resp[0], resp[1]
+ cpBC := resp[1]
// Make sure Alice is aware of channel Bob=>Carol.
ht.AssertChannelInGraph(alice, cpBC)
@@ -204,10 +204,6 @@ func testInvoiceHtlcModifierBasic(ht *lntest.HarnessTest) {
}
cancelModifier()
-
- // Finally, close channels.
- ht.CloseChannel(alice, cpAB)
- ht.CloseChannel(bob, cpBC)
}
// acceptorTestCase is a helper struct to hold test case data.
@@ -247,7 +243,8 @@ type acceptorTestScenario struct {
//
// Among them, Alice and Bob are standby nodes and Carol is a new node.
func newAcceptorTestScenario(ht *lntest.HarnessTest) *acceptorTestScenario {
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNodeWithCoins("bob", nil)
carol := ht.NewNode("carol", nil)
ht.EnsureConnected(alice, bob)
diff --git a/itest/lnd_macaroons_test.go b/itest/lnd_macaroons_test.go
index 3adfafffe9..b896d455ac 100644
--- a/itest/lnd_macaroons_test.go
+++ b/itest/lnd_macaroons_test.go
@@ -29,7 +29,7 @@ func testMacaroonAuthentication(ht *lntest.HarnessTest) {
newAddrReq = &lnrpc.NewAddressRequest{
Type: AddrTypeWitnessPubkeyHash,
}
- testNode = ht.Alice
+ testNode = ht.NewNode("Alice", nil)
testClient = testNode.RPC.LN
)
@@ -295,7 +295,7 @@ func testMacaroonAuthentication(ht *lntest.HarnessTest) {
// in the request must be set correctly, and the baked macaroon has the intended
// permissions.
func testBakeMacaroon(ht *lntest.HarnessTest) {
- var testNode = ht.Alice
+ var testNode = ht.NewNode("Alice", nil)
testCases := []struct {
name string
@@ -521,7 +521,7 @@ func testBakeMacaroon(ht *lntest.HarnessTest) {
func testDeleteMacaroonID(ht *lntest.HarnessTest) {
var (
ctxb = ht.Context()
- testNode = ht.Alice
+ testNode = ht.NewNode("Alice", nil)
)
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
diff --git a/itest/lnd_max_channel_size_test.go b/itest/lnd_max_channel_size_test.go
index 9959c75895..4e19f86538 100644
--- a/itest/lnd_max_channel_size_test.go
+++ b/itest/lnd_max_channel_size_test.go
@@ -56,9 +56,7 @@ func testMaxChannelSize(ht *lntest.HarnessTest) {
// Creating a wumbo channel between these two nodes should succeed.
ht.EnsureConnected(wumboNode, wumboNode3)
- chanPoint := ht.OpenChannel(
+ ht.OpenChannel(
wumboNode, wumboNode3, lntest.OpenChannelParams{Amt: chanAmt},
)
-
- ht.CloseChannel(wumboNode, chanPoint)
}
diff --git a/itest/lnd_max_htlcs_test.go b/itest/lnd_max_htlcs_test.go
index 971537adf4..6c4eb85946 100644
--- a/itest/lnd_max_htlcs_test.go
+++ b/itest/lnd_max_htlcs_test.go
@@ -19,25 +19,20 @@ func testMaxHtlcPathfind(ht *lntest.HarnessTest) {
// Bob to add a maximum of 5 htlcs to her commitment.
maxHtlcs := 5
- alice, bob := ht.Alice, ht.Bob
-
- // Restart nodes with the new flag so they understand the new payment
+ // Create nodes with the new flag so they understand the new payment
// status.
- ht.RestartNodeWithExtraArgs(alice, []string{
- "--routerrpc.usestatusinitiated",
- })
- ht.RestartNodeWithExtraArgs(bob, []string{
- "--routerrpc.usestatusinitiated",
- })
-
- ht.EnsureConnected(alice, bob)
- chanPoint := ht.OpenChannel(
- alice, bob, lntest.OpenChannelParams{
+ cfg := []string{"--routerrpc.usestatusinitiated"}
+ cfgs := [][]string{cfg, cfg}
+
+ // Create a channel Alice->Bob.
+ _, nodes := ht.CreateSimpleNetwork(
+ cfgs, lntest.OpenChannelParams{
Amt: 1000000,
PushAmt: 800000,
RemoteMaxHtlcs: uint16(maxHtlcs),
},
)
+ alice, bob := nodes[0], nodes[1]
// Alice and bob should have one channel open with each other now.
ht.AssertNodeNumChannels(alice, 1)
@@ -82,8 +77,6 @@ func testMaxHtlcPathfind(ht *lntest.HarnessTest) {
ht.AssertNumActiveHtlcs(alice, 0)
ht.AssertNumActiveHtlcs(bob, 0)
-
- ht.CloseChannel(alice, chanPoint)
}
type holdSubscription struct {
diff --git a/itest/lnd_misc_test.go b/itest/lnd_misc_test.go
index 02e3386e6a..30dba0a878 100644
--- a/itest/lnd_misc_test.go
+++ b/itest/lnd_misc_test.go
@@ -10,7 +10,6 @@ import (
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcwallet/wallet"
- "github.com/lightningnetwork/lnd/chainreg"
"github.com/lightningnetwork/lnd/funding"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/lncfg"
@@ -39,9 +38,8 @@ func testDisconnectingTargetPeer(ht *lntest.HarnessTest) {
"--maxbackoff=1m",
}
- alice, bob := ht.Alice, ht.Bob
- ht.RestartNodeWithExtraArgs(alice, args)
- ht.RestartNodeWithExtraArgs(bob, args)
+ alice := ht.NewNodeWithCoins("Alice", args)
+ bob := ht.NewNodeWithCoins("Bob", args)
// Start by connecting Alice and Bob with no channels.
ht.EnsureConnected(alice, bob)
@@ -157,7 +155,6 @@ func testSphinxReplayPersistence(ht *lntest.HarnessTest) {
Amt: chanAmt,
},
)
- defer ht.CloseChannel(fred, chanPointFC)
// Now that the channel is open, create an invoice for Dave which
// expects a payment of 1000 satoshis from Carol paid via a particular
@@ -226,9 +223,6 @@ func testSphinxReplayPersistence(ht *lntest.HarnessTest) {
// unaltered.
ht.AssertAmountPaid("carol => dave", carol, chanPoint, 0, 0)
ht.AssertAmountPaid("dave <= carol", dave, chanPoint, 0, 0)
-
- // Cleanup by mining the force close and sweep transaction.
- ht.ForceCloseChannel(carol, chanPoint)
}
// testListChannels checks that the response from ListChannels is correct. It
@@ -239,17 +233,11 @@ func testListChannels(ht *lntest.HarnessTest) {
const aliceRemoteMaxHtlcs = 50
const bobRemoteMaxHtlcs = 100
- // Get the standby nodes and open a channel between them.
- alice, bob := ht.Alice, ht.Bob
-
args := []string{fmt.Sprintf(
"--default-remote-max-htlcs=%v",
bobRemoteMaxHtlcs,
)}
- ht.RestartNodeWithExtraArgs(bob, args)
-
- // Connect Alice to Bob.
- ht.EnsureConnected(alice, bob)
+ cfgs := [][]string{nil, args}
// Open a channel with 100k satoshis between Alice and Bob with Alice
// being the sole funder of the channel. The minial HTLC amount is set
@@ -264,8 +252,10 @@ func testListChannels(ht *lntest.HarnessTest) {
MinHtlc: customizedMinHtlc,
RemoteMaxHtlcs: aliceRemoteMaxHtlcs,
}
- chanPoint := ht.OpenChannel(alice, bob, p)
- defer ht.CloseChannel(alice, chanPoint)
+
+ chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, p)
+ alice, bob := nodes[0], nodes[1]
+ chanPoint := chanPoints[0]
// Alice should have one channel opened with Bob.
ht.AssertNodeNumChannels(alice, 1)
@@ -369,7 +359,7 @@ func testMaxPendingChannels(ht *lntest.HarnessTest) {
}
carol := ht.NewNode("Carol", args)
- alice := ht.Alice
+ alice := ht.NewNodeWithCoins("Alice", nil)
ht.ConnectNodes(alice, carol)
carolBalance := btcutil.Amount(maxPendingChannels) * amount
@@ -425,12 +415,6 @@ func testMaxPendingChannels(ht *lntest.HarnessTest) {
chanPoints[i] = fundingChanPoint
}
-
- // Next, close the channel between Alice and Carol, asserting that the
- // channel has been properly closed on-chain.
- for _, chanPoint := range chanPoints {
- ht.CloseChannel(alice, chanPoint)
- }
}
// testGarbageCollectLinkNodes tests that we properly garbage collect link
@@ -439,7 +423,9 @@ func testMaxPendingChannels(ht *lntest.HarnessTest) {
func testGarbageCollectLinkNodes(ht *lntest.HarnessTest) {
const chanAmt = 1000000
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNodeWithCoins("Bob", nil)
+ ht.EnsureConnected(alice, bob)
// Open a channel between Alice and Bob which will later be
// cooperatively closed.
@@ -467,7 +453,7 @@ func testGarbageCollectLinkNodes(ht *lntest.HarnessTest) {
dave := ht.NewNode("Dave", nil)
ht.ConnectNodes(alice, dave)
- persistentChanPoint := ht.OpenChannel(
+ ht.OpenChannel(
alice, dave, lntest.OpenChannelParams{
Amt: chanAmt,
},
@@ -519,12 +505,6 @@ func testGarbageCollectLinkNodes(ht *lntest.HarnessTest) {
// close the channel instead.
ht.ForceCloseChannel(alice, forceCloseChanPoint)
- // We'll need to mine some blocks in order to mark the channel fully
- // closed.
- ht.MineBlocks(
- chainreg.DefaultBitcoinTimeLockDelta - defaultCSV,
- )
-
// Before we test reconnection, we'll ensure that the channel has been
// fully cleaned up for both Carol and Alice.
ht.AssertNumPendingForceClose(alice, 0)
@@ -540,9 +520,6 @@ func testGarbageCollectLinkNodes(ht *lntest.HarnessTest) {
"did not expect to find bob in the channel graph, but did")
require.NotContains(ht, channelGraph.Nodes, carol.PubKeyStr,
"did not expect to find carol in the channel graph, but did")
-
- // Now that the test is done, we can also close the persistent link.
- ht.CloseChannel(alice, persistentChanPoint)
}
// testRejectHTLC tests that a node can be created with the flag --rejecthtlc.
@@ -553,7 +530,8 @@ func testRejectHTLC(ht *lntest.HarnessTest) {
// Alice ------> Carol ------> Bob
//
const chanAmt = btcutil.Amount(1000000)
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNodeWithCoins("Bob", nil)
// Create Carol with reject htlc flag.
carol := ht.NewNode("Carol", []string{"--rejecthtlc"})
@@ -568,14 +546,14 @@ func testRejectHTLC(ht *lntest.HarnessTest) {
ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
// Open a channel between Alice and Carol.
- chanPointAlice := ht.OpenChannel(
+ ht.OpenChannel(
alice, carol, lntest.OpenChannelParams{
Amt: chanAmt,
},
)
// Open a channel between Carol and Bob.
- chanPointCarol := ht.OpenChannel(
+ ht.OpenChannel(
carol, bob, lntest.OpenChannelParams{
Amt: chanAmt,
},
@@ -632,14 +610,12 @@ func testRejectHTLC(ht *lntest.HarnessTest) {
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
}
- payStream := alice.RPC.SendPayment(paymentReq)
- ht.AssertPaymentStatusFromStream(payStream, lnrpc.Payment_FAILED)
+ ht.SendPaymentAssertFail(
+ alice, paymentReq,
+ lnrpc.PaymentFailureReason_FAILURE_REASON_NO_ROUTE,
+ )
ht.AssertLastHTLCError(alice, lnrpc.Failure_CHANNEL_DISABLED)
-
- // Close all channels.
- ht.CloseChannel(alice, chanPointAlice)
- ht.CloseChannel(carol, chanPointCarol)
}
// testNodeSignVerify checks that only connected nodes are allowed to perform
@@ -647,15 +623,15 @@ func testRejectHTLC(ht *lntest.HarnessTest) {
func testNodeSignVerify(ht *lntest.HarnessTest) {
chanAmt := funding.MaxBtcFundingAmount
pushAmt := btcutil.Amount(100000)
- alice, bob := ht.Alice, ht.Bob
+ p := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ PushAmt: pushAmt,
+ }
// Create a channel between alice and bob.
- aliceBobCh := ht.OpenChannel(
- alice, bob, lntest.OpenChannelParams{
- Amt: chanAmt,
- PushAmt: pushAmt,
- },
- )
+ cfgs := [][]string{nil, nil}
+ _, nodes := ht.CreateSimpleNetwork(cfgs, p)
+ alice, bob := nodes[0], nodes[1]
// alice signs "alice msg" and sends her signature to bob.
aliceMsg := []byte("alice msg")
@@ -683,23 +659,23 @@ func testNodeSignVerify(ht *lntest.HarnessTest) {
require.False(ht, verifyResp.Valid, "carol's signature didn't validate")
require.Equal(ht, verifyResp.Pubkey, carol.PubKeyStr,
"carol's signature doesn't contain alice's pubkey.")
-
- // Close the channel between alice and bob.
- ht.CloseChannel(alice, aliceBobCh)
}
// testAbandonChannel abandons a channel and asserts that it is no longer open
// and not in one of the pending closure states. It also verifies that the
// abandoned channel is reported as closed with close type 'abandoned'.
func testAbandonChannel(ht *lntest.HarnessTest) {
- alice, bob := ht.Alice, ht.Bob
-
// First establish a channel between Alice and Bob.
channelParam := lntest.OpenChannelParams{
Amt: funding.MaxBtcFundingAmount,
PushAmt: btcutil.Amount(100000),
}
- chanPoint := ht.OpenChannel(alice, bob, channelParam)
+
+ // Create a channel between alice and bob.
+ cfgs := [][]string{nil, nil}
+ chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, channelParam)
+ alice := nodes[0]
+ chanPoint := chanPoints[0]
// Now that the channel is open, we'll obtain its channel ID real quick
// so we can use it to query the graph below.
@@ -751,16 +727,13 @@ func testAbandonChannel(ht *lntest.HarnessTest) {
// Calling AbandonChannel again, should result in no new errors, as the
// channel has already been removed.
alice.RPC.AbandonChannel(abandonChannelRequest)
-
- // Now that we're done with the test, the channel can be closed. This
- // is necessary to avoid unexpected outcomes of other tests that use
- // Bob's lnd instance.
- ht.ForceCloseChannel(bob, chanPoint)
}
// testSendAllCoins tests that we're able to properly sweep all coins from the
// wallet into a single target address at the specified fee rate.
func testSendAllCoins(ht *lntest.HarnessTest) {
+ alice := ht.NewNodeWithCoins("Alice", nil)
+
// First, we'll make a new node, Ainz who'll we'll use to test wallet
// sweeping.
//
@@ -787,7 +760,7 @@ func testSendAllCoins(ht *lntest.HarnessTest) {
// Ensure that we can't send coins to another user's Pubkey.
err = ainz.RPC.SendCoinsAssertErr(&lnrpc.SendCoinsRequest{
- Addr: ht.Alice.RPC.GetInfo().IdentityPubkey,
+ Addr: alice.RPC.GetInfo().IdentityPubkey,
SendAll: true,
Label: sendCoinsLabel,
TargetConf: 6,
@@ -1158,7 +1131,8 @@ func assertChannelConstraintsEqual(ht *lntest.HarnessTest,
// on a message with a provided address.
func testSignVerifyMessageWithAddr(ht *lntest.HarnessTest) {
// Using different nodes to sign the message and verify the signature.
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNode("Alice,", nil)
+ bob := ht.NewNode("Bob,", nil)
// Test an lnd wallet created P2WKH address.
respAddr := alice.RPC.NewAddress(&lnrpc.NewAddressRequest{
@@ -1275,7 +1249,7 @@ func testSignVerifyMessageWithAddr(ht *lntest.HarnessTest) {
// up with native SQL enabled, as we don't currently support migration of KV
// invoices to the new SQL schema.
func testNativeSQLNoMigration(ht *lntest.HarnessTest) {
- alice := ht.Alice
+ alice := ht.NewNode("Alice", nil)
// Make sure we run the test with SQLite or Postgres.
if alice.Cfg.DBBackend != node.BackendSqlite &&
diff --git a/itest/lnd_mpp_test.go b/itest/lnd_mpp_test.go
index 800708569d..7bc23da2a8 100644
--- a/itest/lnd_mpp_test.go
+++ b/itest/lnd_mpp_test.go
@@ -1,6 +1,7 @@
package itest
import (
+ "encoding/hex"
"time"
"github.com/btcsuite/btcd/btcutil"
@@ -14,6 +15,110 @@ import (
"github.com/stretchr/testify/require"
)
+// testSendMultiPathPayment tests that we are able to successfully route a
+// payment using multiple shards across different paths.
+func testSendMultiPathPayment(ht *lntest.HarnessTest) {
+ mts := newMppTestScenario(ht)
+
+ const paymentAmt = btcutil.Amount(300000)
+
+ // Set up a network with three different paths Alice <-> Bob. Channel
+ // capacities are set such that the payment can only succeed if (at
+ // least) three paths are used.
+ //
+ // _ Eve _
+ // / \
+ // Alice -- Carol ---- Bob
+ // \ /
+ // \__ Dave ____/
+ //
+ req := &mppOpenChannelRequest{
+ amtAliceCarol: 285000,
+ amtAliceDave: 155000,
+ amtCarolBob: 200000,
+ amtCarolEve: 155000,
+ amtDaveBob: 155000,
+ amtEveBob: 155000,
+ }
+ mts.openChannels(req)
+ chanPointAliceDave := mts.channelPoints[1]
+
+ // Increase Dave's fee to make the test deterministic. Otherwise, it
+ // would be unpredictable whether pathfinding would go through Charlie
+ // or Dave for the first shard.
+ expectedPolicy := &lnrpc.RoutingPolicy{
+ FeeBaseMsat: 500_000,
+ FeeRateMilliMsat: int64(0.001 * 1_000_000),
+ TimeLockDelta: 40,
+ MinHtlc: 1000, // default value
+ MaxHtlcMsat: 133_650_000,
+ }
+ mts.dave.UpdateGlobalPolicy(expectedPolicy)
+
+ // Make sure Alice has heard it.
+ ht.AssertChannelPolicyUpdate(
+ mts.alice, mts.dave, expectedPolicy, chanPointAliceDave, false,
+ )
+
+ // Our first test will be Alice paying Bob using a SendPayment call.
+ // Let Bob create an invoice for Alice to pay.
+ payReqs, rHashes, invoices := ht.CreatePayReqs(mts.bob, paymentAmt, 1)
+
+ rHash := rHashes[0]
+ payReq := payReqs[0]
+
+ sendReq := &routerrpc.SendPaymentRequest{
+ PaymentRequest: payReq,
+ MaxParts: 10,
+ TimeoutSeconds: 60,
+ FeeLimitMsat: noFeeLimitMsat,
+ }
+ payment := ht.SendPaymentAssertSettled(mts.alice, sendReq)
+
+ // Make sure we got the preimage.
+ require.Equal(ht, hex.EncodeToString(invoices[0].RPreimage),
+ payment.PaymentPreimage, "preimage doesn't match")
+
+ // Check that Alice split the payment in at least three shards. Because
+ // the hand-off of the htlc to the link is asynchronous (via a mailbox),
+ // there is some non-determinism in the process. Depending on whether
+ // the new pathfinding round is started before or after the htlc is
+ // locked into the channel, different sharding may occur. Therefore we
+ // can only check if the number of shards isn't below the theoretical
+ // minimum.
+ succeeded := 0
+ for _, htlc := range payment.Htlcs {
+ if htlc.Status == lnrpc.HTLCAttempt_SUCCEEDED {
+ succeeded++
+ }
+ }
+
+ const minExpectedShards = 3
+ require.GreaterOrEqual(ht, succeeded, minExpectedShards,
+ "expected shards not reached")
+
+ // Make sure Bob show the invoice as settled for the full amount.
+ inv := mts.bob.RPC.LookupInvoice(rHash)
+
+ require.EqualValues(ht, paymentAmt, inv.AmtPaidSat,
+ "incorrect payment amt")
+
+ require.Equal(ht, lnrpc.Invoice_SETTLED, inv.State,
+ "Invoice not settled")
+
+ settled := 0
+ for _, htlc := range inv.Htlcs {
+ if htlc.State == lnrpc.InvoiceHTLCState_SETTLED {
+ settled++
+ }
+ }
+ require.Equal(ht, succeeded, settled,
+ "num of HTLCs wrong")
+
+ // Finally, close all channels.
+ mts.closeChannels()
+}
+
// testSendToRouteMultiPath tests that we are able to successfully route a
// payment using multiple shards across different paths, by using SendToRoute.
func testSendToRouteMultiPath(ht *lntest.HarnessTest) {
@@ -180,8 +285,8 @@ type mppTestScenario struct {
// \ /
// \__ Dave ____/
func newMppTestScenario(ht *lntest.HarnessTest) *mppTestScenario {
- alice, bob := ht.Alice, ht.Bob
- ht.RestartNodeWithExtraArgs(bob, []string{
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNodeWithCoins("Bob", []string{
"--maxpendingchannels=2",
"--accept-amp",
})
@@ -299,7 +404,7 @@ func (m *mppTestScenario) openChannels(r *mppOpenChannelRequest) {
}
// Each node should have exactly 6 edges.
- m.ht.AssertNumActiveEdges(hn, len(m.channelPoints), false)
+ m.ht.AssertNumEdges(hn, len(m.channelPoints), false)
}
}
diff --git a/itest/lnd_multi-hop-error-propagation_test.go b/itest/lnd_multi-hop-error-propagation_test.go
index 6fe34fdcfc..8d20a8fc3b 100644
--- a/itest/lnd_multi-hop-error-propagation_test.go
+++ b/itest/lnd_multi-hop-error-propagation_test.go
@@ -15,7 +15,9 @@ func testHtlcErrorPropagation(ht *lntest.HarnessTest) {
// multi-hop payment.
const chanAmt = funding.MaxBtcFundingAmount
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNodeWithCoins("Bob", nil)
+ ht.EnsureConnected(alice, bob)
// Since we'd like to test some multi-hop failure scenarios, we'll
// introduce another node into our test network: Carol.
@@ -363,12 +365,4 @@ func testHtlcErrorPropagation(ht *lntest.HarnessTest) {
ht.AssertHtlcEventTypes(
bobEvents, routerrpc.HtlcEvent_UNKNOWN, lntest.HtlcEventFinal,
)
-
- // Finally, immediately close the channel. This function will also
- // block until the channel is closed and will additionally assert the
- // relevant channel closing post conditions.
- ht.CloseChannel(alice, chanPointAlice)
-
- // Force close Bob's final channel.
- ht.ForceCloseChannel(bob, chanPointBob)
}
diff --git a/itest/lnd_multi-hop-payments_test.go b/itest/lnd_multi-hop-payments_test.go
index ba7797d7fc..e965140523 100644
--- a/itest/lnd_multi-hop-payments_test.go
+++ b/itest/lnd_multi-hop-payments_test.go
@@ -18,7 +18,8 @@ func testMultiHopPayments(ht *lntest.HarnessTest) {
// channel with Alice, and Carol with Dave. After this setup, the
// network topology should now look like:
// Carol -> Dave -> Alice -> Bob
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNode("Bob", nil)
daveArgs := []string{"--protocol.legacy.onion"}
dave := ht.NewNode("Dave", daveArgs)
@@ -37,6 +38,7 @@ func testMultiHopPayments(ht *lntest.HarnessTest) {
ht.AssertHtlcEventType(daveEvents, routerrpc.HtlcEvent_UNKNOWN)
// Connect the nodes.
+ ht.ConnectNodes(alice, bob)
ht.ConnectNodes(dave, alice)
ht.ConnectNodes(carol, dave)
@@ -233,11 +235,6 @@ func testMultiHopPayments(ht *lntest.HarnessTest) {
ht.AssertHtlcEvents(
bobEvents, 0, 0, numPayments, 0, routerrpc.HtlcEvent_RECEIVE,
)
-
- // Finally, close all channels.
- ht.CloseChannel(alice, chanPointAlice)
- ht.CloseChannel(dave, chanPointDave)
- ht.CloseChannel(carol, chanPointCarol)
}
// updateChannelPolicy updates the channel policy of node to the given fees and
diff --git a/itest/lnd_multi-hop_force_close_test.go b/itest/lnd_multi-hop_force_close_test.go
new file mode 100644
index 0000000000..4622212142
--- /dev/null
+++ b/itest/lnd_multi-hop_force_close_test.go
@@ -0,0 +1,3151 @@
+package itest
+
+import (
+ "github.com/btcsuite/btcd/btcutil"
+ "github.com/lightningnetwork/lnd/lncfg"
+ "github.com/lightningnetwork/lnd/lnrpc"
+ "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
+ "github.com/lightningnetwork/lnd/lnrpc/routerrpc"
+ "github.com/lightningnetwork/lnd/lntest"
+ "github.com/lightningnetwork/lnd/lntest/node"
+ "github.com/lightningnetwork/lnd/lntest/rpc"
+ "github.com/lightningnetwork/lnd/lntypes"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ chanAmt = 1_000_000
+ invoiceAmt = 100_000
+ htlcAmt = btcutil.Amount(300_000)
+
+ incomingBroadcastDelta = lncfg.DefaultIncomingBroadcastDelta
+)
+
+var leasedType = lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE
+
+// multiHopForceCloseTestCases defines a set of tests that focuses on the
+// behavior of the force close in a multi-hop scenario.
+//
+//nolint:lll
+var multiHopForceCloseTestCases = []*lntest.TestCase{
+ {
+ Name: "multihop local claim outgoing htlc anchor",
+ TestFunc: testLocalClaimOutgoingHTLCAnchor,
+ },
+ {
+ Name: "multihop local claim outgoing htlc anchor zero conf",
+ TestFunc: testLocalClaimOutgoingHTLCAnchorZeroConf,
+ },
+ {
+ Name: "multihop local claim outgoing htlc simple taproot",
+ TestFunc: testLocalClaimOutgoingHTLCSimpleTaproot,
+ },
+ {
+ Name: "multihop local claim outgoing htlc simple taproot zero conf",
+ TestFunc: testLocalClaimOutgoingHTLCSimpleTaprootZeroConf,
+ },
+ {
+ Name: "multihop local claim outgoing htlc leased",
+ TestFunc: testLocalClaimOutgoingHTLCLeased,
+ },
+ {
+ Name: "multihop local claim outgoing htlc leased zero conf",
+ TestFunc: testLocalClaimOutgoingHTLCLeasedZeroConf,
+ },
+ {
+ Name: "multihop receiver preimage claim anchor",
+ TestFunc: testMultiHopReceiverPreimageClaimAnchor,
+ },
+ {
+ Name: "multihop receiver preimage claim anchor zero conf",
+ TestFunc: testMultiHopReceiverPreimageClaimAnchorZeroConf,
+ },
+ {
+ Name: "multihop receiver preimage claim simple taproot",
+ TestFunc: testMultiHopReceiverPreimageClaimSimpleTaproot,
+ },
+ {
+ Name: "multihop receiver preimage claim simple taproot zero conf",
+ TestFunc: testMultiHopReceiverPreimageClaimSimpleTaprootZeroConf,
+ },
+ {
+ Name: "multihop receiver preimage claim leased",
+ TestFunc: testMultiHopReceiverPreimageClaimLeased,
+ },
+ {
+ Name: "multihop receiver preimage claim leased zero conf",
+ TestFunc: testMultiHopReceiverPreimageClaimLeasedZeroConf,
+ },
+ {
+ Name: "multihop local force close before timeout anchor",
+ TestFunc: testLocalForceCloseBeforeTimeoutAnchor,
+ },
+ {
+ Name: "multihop local force close before timeout anchor zero conf",
+ TestFunc: testLocalForceCloseBeforeTimeoutAnchorZeroConf,
+ },
+ {
+ Name: "multihop local force close before timeout simple taproot",
+ TestFunc: testLocalForceCloseBeforeTimeoutSimpleTaproot,
+ },
+ {
+ Name: "multihop local force close before timeout simple taproot zero conf",
+ TestFunc: testLocalForceCloseBeforeTimeoutSimpleTaprootZeroConf,
+ },
+ {
+ Name: "multihop local force close before timeout leased",
+ TestFunc: testLocalForceCloseBeforeTimeoutLeased,
+ },
+ {
+ Name: "multihop local force close before timeout leased zero conf",
+ TestFunc: testLocalForceCloseBeforeTimeoutLeasedZeroConf,
+ },
+ {
+ Name: "multihop remote force close before timeout anchor",
+ TestFunc: testRemoteForceCloseBeforeTimeoutAnchor,
+ },
+ {
+ Name: "multihop remote force close before timeout anchor zero conf",
+ TestFunc: testRemoteForceCloseBeforeTimeoutAnchorZeroConf,
+ },
+ {
+ Name: "multihop remote force close before timeout simple taproot",
+ TestFunc: testRemoteForceCloseBeforeTimeoutSimpleTaproot,
+ },
+ {
+ Name: "multihop remote force close before timeout simple taproot zero conf",
+ TestFunc: testRemoteForceCloseBeforeTimeoutSimpleTaprootZeroConf,
+ },
+ {
+ Name: "multihop remote force close before timeout leased",
+ TestFunc: testRemoteForceCloseBeforeTimeoutLeased,
+ },
+ {
+ Name: "multihop remote force close before timeout leased zero conf",
+ TestFunc: testRemoteForceCloseBeforeTimeoutLeasedZeroConf,
+ },
+ {
+ Name: "multihop local claim incoming htlc anchor",
+ TestFunc: testLocalClaimIncomingHTLCAnchor,
+ },
+ {
+ Name: "multihop local claim incoming htlc anchor zero conf",
+ TestFunc: testLocalClaimIncomingHTLCAnchorZeroConf,
+ },
+ {
+ Name: "multihop local claim incoming htlc simple taproot",
+ TestFunc: testLocalClaimIncomingHTLCSimpleTaproot,
+ },
+ {
+ Name: "multihop local claim incoming htlc simple taproot zero conf",
+ TestFunc: testLocalClaimIncomingHTLCSimpleTaprootZeroConf,
+ },
+ {
+ Name: "multihop local claim incoming htlc leased",
+ TestFunc: testLocalClaimIncomingHTLCLeased,
+ },
+ {
+ Name: "multihop local claim incoming htlc leased zero conf",
+ TestFunc: testLocalClaimIncomingHTLCLeasedZeroConf,
+ },
+ {
+ Name: "multihop local preimage claim anchor",
+ TestFunc: testLocalPreimageClaimAnchor,
+ },
+ {
+ Name: "multihop local preimage claim anchor zero conf",
+ TestFunc: testLocalPreimageClaimAnchorZeroConf,
+ },
+ {
+ Name: "multihop local preimage claim simple taproot",
+ TestFunc: testLocalPreimageClaimSimpleTaproot,
+ },
+ {
+ Name: "multihop local preimage claim simple taproot zero conf",
+ TestFunc: testLocalPreimageClaimSimpleTaprootZeroConf,
+ },
+ {
+ Name: "multihop local preimage claim leased",
+ TestFunc: testLocalPreimageClaimLeased,
+ },
+ {
+ Name: "multihop local preimage claim leased zero conf",
+ TestFunc: testLocalPreimageClaimLeasedZeroConf,
+ },
+ {
+ Name: "multihop htlc aggregation anchor",
+ TestFunc: testHtlcAggregaitonAnchor,
+ },
+ {
+ Name: "multihop htlc aggregation anchor zero conf",
+ TestFunc: testHtlcAggregaitonAnchorZeroConf,
+ },
+ {
+ Name: "multihop htlc aggregation simple taproot",
+ TestFunc: testHtlcAggregaitonSimpleTaproot,
+ },
+ {
+ Name: "multihop htlc aggregation simple taproot zero conf",
+ TestFunc: testHtlcAggregaitonSimpleTaprootZeroConf,
+ },
+ {
+ Name: "multihop htlc aggregation leased",
+ TestFunc: testHtlcAggregaitonLeased,
+ },
+ {
+ Name: "multihop htlc aggregation leased zero conf",
+ TestFunc: testHtlcAggregaitonLeasedZeroConf,
+ },
+}
+
+// testLocalClaimOutgoingHTLCAnchor tests `runLocalClaimOutgoingHTLC` with
+// anchor channel.
+func testLocalClaimOutgoingHTLCAnchor(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using anchor
+ // channels.
+ //
+ // Prepare params.
+ openChannelParams := lntest.OpenChannelParams{Amt: chanAmt}
+
+ cfg := node.CfgAnchor
+ cfgCarol := append(cfg, "--hodl.exit-settle")
+ cfgs := [][]string{cfg, cfg, cfgCarol}
+
+ runLocalClaimOutgoingHTLC(ht, cfgs, openChannelParams)
+}
+
+// testLocalClaimOutgoingHTLCAnchorZeroConf tests `runLocalClaimOutgoingHTLC`
+// with zero conf anchor channel.
+func testLocalClaimOutgoingHTLCAnchorZeroConf(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using zero-conf
+ // anchor channels.
+ //
+ // Prepare params.
+ openChannelParams := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ ZeroConf: true,
+ CommitmentType: lnrpc.CommitmentType_ANCHORS,
+ }
+
+ // Prepare Carol's node config to enable zero-conf and anchor.
+ cfg := node.CfgZeroConf
+ cfgCarol := append(cfg, "--hodl.exit-settle")
+ cfgs := [][]string{cfg, cfg, cfgCarol}
+
+ runLocalClaimOutgoingHTLC(ht, cfgs, openChannelParams)
+}
+
+// testLocalClaimOutgoingHTLCSimpleTaproot tests `runLocalClaimOutgoingHTLC`
+// with simple taproot channel.
+func testLocalClaimOutgoingHTLCSimpleTaproot(ht *lntest.HarnessTest) {
+ c := lnrpc.CommitmentType_SIMPLE_TAPROOT
+
+ // Create a three hop network: Alice -> Bob -> Carol, using simple
+ // taproot channels.
+ //
+ // Prepare params.
+ openChannelParams := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ CommitmentType: c,
+ Private: true,
+ }
+
+ cfg := node.CfgSimpleTaproot
+ cfgCarol := append(cfg, "--hodl.exit-settle")
+ cfgs := [][]string{cfg, cfg, cfgCarol}
+
+ runLocalClaimOutgoingHTLC(ht, cfgs, openChannelParams)
+}
+
+// testLocalClaimOutgoingHTLCSimpleTaprootZeroConf tests
+// `runLocalClaimOutgoingHTLC` with zero-conf simple taproot channel.
+func testLocalClaimOutgoingHTLCSimpleTaprootZeroConf(ht *lntest.HarnessTest) {
+ c := lnrpc.CommitmentType_SIMPLE_TAPROOT
+
+ // Create a three hop network: Alice -> Bob -> Carol, using zero-conf
+ // simple taproot channels.
+ //
+ // Prepare params.
+ openChannelParams := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ ZeroConf: true,
+ CommitmentType: c,
+ Private: true,
+ }
+
+ // Prepare Carol's node config to enable zero-conf and leased
+ // channel.
+ cfg := node.CfgSimpleTaproot
+ cfg = append(cfg, node.CfgZeroConf...)
+ cfgCarol := append(cfg, "--hodl.exit-settle")
+ cfgs := [][]string{cfg, cfg, cfgCarol}
+
+ runLocalClaimOutgoingHTLC(ht, cfgs, openChannelParams)
+}
+
+// testLocalClaimOutgoingHTLCLeased tests `runLocalClaimOutgoingHTLC` with
+// script enforced lease channel.
+func testLocalClaimOutgoingHTLCLeased(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using leased
+ // channels.
+ //
+ // Prepare params.
+ openChannelParams := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ CommitmentType: leasedType,
+ }
+
+ cfg := node.CfgLeased
+ cfgCarol := append(cfg, "--hodl.exit-settle")
+ cfgs := [][]string{cfg, cfg, cfgCarol}
+
+ runLocalClaimOutgoingHTLC(ht, cfgs, openChannelParams)
+}
+
+// testLocalClaimOutgoingHTLCLeasedZeroConf tests `runLocalClaimOutgoingHTLC`
+// with zero-conf script enforced lease channel.
+func testLocalClaimOutgoingHTLCLeasedZeroConf(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using zero-conf
+ // anchor channels.
+ //
+ // Prepare params.
+ openChannelParams := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ ZeroConf: true,
+ CommitmentType: leasedType,
+ }
+
+ // Prepare Carol's node config to enable zero-conf and leased
+ // channel.
+ cfg := node.CfgLeased
+ cfg = append(cfg, node.CfgZeroConf...)
+ cfgCarol := append(cfg, "--hodl.exit-settle")
+ cfgs := [][]string{cfg, cfg, cfgCarol}
+
+ runLocalClaimOutgoingHTLC(ht, cfgs, openChannelParams)
+}
+
+// runLocalClaimOutgoingHTLC tests that in a multi-hop scenario, if the
+// outgoing HTLC is about to time out, then we'll go to chain in order to claim
+// it using the HTLC timeout transaction. Any dust HTLC's should be immediately
+// canceled backwards. Once the timeout has been reached, then we should sweep
+// it on-chain, and cancel the HTLC backwards.
+func runLocalClaimOutgoingHTLC(ht *lntest.HarnessTest,
+ cfgs [][]string, params lntest.OpenChannelParams) {
+
+ // Create a three hop network: Alice -> Bob -> Carol.
+ _, nodes := ht.CreateSimpleNetwork(cfgs, params)
+ alice, bob, carol := nodes[0], nodes[1], nodes[2]
+
+ // For neutrino backend, we need to fund one more UTXO for Bob so he
+ // can sweep his outputs.
+ if ht.IsNeutrinoBackend() {
+ ht.FundCoins(btcutil.SatoshiPerBitcoin, bob)
+ }
+
+ // Now that our channels are set up, we'll send two HTLC's from Alice
+ // to Carol. The first HTLC will be universally considered "dust",
+ // while the second will be a proper fully valued HTLC.
+ const dustHtlcAmt = btcutil.Amount(100)
+
+ // We'll create two random payment hashes unknown to carol, then send
+ // each of them by manually specifying the HTLC details.
+ carolPubKey := carol.PubKey[:]
+ dustPayHash := ht.Random32Bytes()
+ payHash := ht.Random32Bytes()
+
+ // If this is a taproot channel, then we'll need to make some manual
+ // route hints so Alice can actually find a route.
+ var routeHints []*lnrpc.RouteHint
+ if params.CommitmentType == lnrpc.CommitmentType_SIMPLE_TAPROOT {
+ routeHints = makeRouteHints(bob, carol, params.ZeroConf)
+ }
+
+ req := &routerrpc.SendPaymentRequest{
+ Dest: carolPubKey,
+ Amt: int64(dustHtlcAmt),
+ PaymentHash: dustPayHash,
+ FinalCltvDelta: finalCltvDelta,
+ TimeoutSeconds: 60,
+ FeeLimitMsat: noFeeLimitMsat,
+ RouteHints: routeHints,
+ }
+ ht.SendPaymentAssertInflight(alice, req)
+
+ req = &routerrpc.SendPaymentRequest{
+ Dest: carolPubKey,
+ Amt: int64(htlcAmt),
+ PaymentHash: payHash,
+ FinalCltvDelta: finalCltvDelta,
+ TimeoutSeconds: 60,
+ FeeLimitMsat: noFeeLimitMsat,
+ RouteHints: routeHints,
+ }
+ ht.SendPaymentAssertInflight(alice, req)
+
+ // At this point, all 3 nodes should now have an active channel with
+ // the created HTLC pending on all of them.
+ //
+ // Alice should have two outgoing HTLCs on channel Alice -> Bob.
+ ht.AssertNumActiveHtlcs(alice, 2)
+
+ // Bob should have two incoming HTLCs on channel Alice -> Bob, and two
+ // outgoing HTLCs on channel Bob -> Carol.
+ ht.AssertNumActiveHtlcs(bob, 4)
+
+ // Carol should have two incoming HTLCs on channel Bob -> Carol.
+ ht.AssertNumActiveHtlcs(carol, 2)
+
+ // We'll now mine enough blocks to trigger Bob's force close the
+ // channel Bob=>Carol due to the fact that the HTLC is about to
+ // timeout. With the default outgoing broadcast delta of zero, this
+ // will be the same height as the htlc expiry height.
+ numBlocks := padCLTV(
+ uint32(finalCltvDelta - lncfg.DefaultOutgoingBroadcastDelta),
+ )
+ ht.MineBlocks(int(numBlocks))
+
+ // Bob's force close tx should have the following outputs,
+ // 1. anchor output.
+ // 2. to_local output, which is CSV locked.
+ // 3. outgoing HTLC output, which has expired.
+ //
+ // Bob's anchor output should be offered to his sweeper since Bob has
+ // time-sensitive HTLCs - we expect both anchors to be offered, while
+ // the sweeping of the remote anchor will be marked as failed due to
+ // `testmempoolaccept` check.
+ //
+ // For neutrino backend, there's no way to know the sweeping of the
+ // remote anchor is failed, so Bob still sees two pending sweeps.
+ if ht.IsNeutrinoBackend() {
+ ht.AssertNumPendingSweeps(bob, 2)
+ } else {
+ ht.AssertNumPendingSweeps(bob, 1)
+ }
+
+ // We expect to see tow txns in the mempool,
+ // 1. Bob's force close tx.
+ // 2. Bob's anchor sweep tx.
+ ht.AssertNumTxsInMempool(2)
+
+ // Mine a block to confirm the closing tx and the anchor sweeping tx.
+ ht.MineBlocksAndAssertNumTxes(1, 2)
+
+ // At this point, Bob should have canceled backwards the dust HTLC that
+ // we sent earlier. This means Alice should now only have a single HTLC
+ // on her channel.
+ ht.AssertNumActiveHtlcs(alice, 1)
+
+ // With the closing transaction confirmed, we should expect Bob's HTLC
+ // timeout transaction to be offered to the sweeper due to the expiry
+ // being reached. we also expect Carol's anchor sweeps.
+ ht.AssertNumPendingSweeps(bob, 1)
+ ht.AssertNumPendingSweeps(carol, 1)
+
+ // Bob's sweeper should sweep his outgoing HTLC immediately since it's
+ // expired. His to_local output cannot be swept due to the CSV lock.
+ // Carol's anchor sweep should be failed due to output being dust.
+ ht.AssertNumTxsInMempool(1)
+
+ // Mine a block to confirm Bob's outgoing HTLC sweeping tx.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+
+ // With Bob's HTLC timeout transaction confirmed, there should be no
+ // active HTLC's on the commitment transaction from Alice -> Bob.
+ ht.AssertNumActiveHtlcs(alice, 0)
+
+ // At this point, Bob should show that the pending HTLC has advanced to
+ // the second stage and is ready to be swept once the timelock is up.
+ resp := ht.AssertNumPendingForceClose(bob, 1)[0]
+ require.NotZero(ht, resp.LimboBalance)
+ require.Positive(ht, resp.BlocksTilMaturity)
+ require.Equal(ht, 1, len(resp.PendingHtlcs))
+ require.Equal(ht, uint32(2), resp.PendingHtlcs[0].Stage)
+
+ ht.Logf("Bob's timelock to_local output=%v, timelock on second stage "+
+ "htlc=%v", resp.BlocksTilMaturity,
+ resp.PendingHtlcs[0].BlocksTilMaturity)
+
+ if params.CommitmentType == leasedType {
+ // Since Bob is the initiator of the script-enforced leased
+ // channel between him and Carol, he will incur an additional
+ // CLTV on top of the usual CSV delay on any outputs that he
+ // can sweep back to his wallet.
+ //
+ // We now mine enough blocks so the CLTV lock expires, which
+ // will trigger the sweep of the to_local and outgoing HTLC
+ // outputs.
+ ht.MineBlocks(int(resp.BlocksTilMaturity))
+
+ // Check that Bob has a pending sweeping tx which sweeps his
+ // to_local and outgoing HTLC outputs.
+ ht.AssertNumPendingSweeps(bob, 2)
+
+ // Mine a block to confirm the sweeping tx.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+ } else {
+ // Since Bob force closed the channel between him and Carol, he
+ // will incur the usual CSV delay on any outputs that he can
+ // sweep back to his wallet. We'll subtract one block from our
+ // current maturity period to assert on the mempool.
+ ht.MineBlocks(int(resp.BlocksTilMaturity - 1))
+
+ // Check that Bob has a pending sweeping tx which sweeps his
+ // to_local output.
+ ht.AssertNumPendingSweeps(bob, 1)
+
+ // Mine a block to confirm the to_local sweeping tx, which also
+ // triggers the sweeping of the second stage HTLC output.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+
+ // Bob's sweeper should now broadcast his second layer sweep
+ // due to the CSV on the HTLC timeout output.
+ ht.AssertNumTxsInMempool(1)
+
+ // Next, we'll mine a final block that should confirm the
+ // sweeping transactions left.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+ }
+
+ // Once this transaction has been confirmed, Bob should detect that he
+ // no longer has any pending channels.
+ ht.AssertNumPendingForceClose(bob, 0)
+}
+
+// testMultiHopReceiverPreimageClaimAnchor tests
+// `runMultiHopReceiverPreimageClaim` with anchor channels.
+func testMultiHopReceiverPreimageClaimAnchor(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using anchor
+ // channels.
+ //
+ // Prepare params.
+ openChannelParams := lntest.OpenChannelParams{Amt: chanAmt}
+
+ cfg := node.CfgAnchor
+ cfgs := [][]string{cfg, cfg, cfg}
+
+ runMultiHopReceiverPreimageClaim(ht, cfgs, openChannelParams)
+}
+
+// testMultiHopReceiverPreimageClaimAnchorZeroConf tests
+// `runMultiHopReceiverPreimageClaim` with zero-conf anchor channels.
+func testMultiHopReceiverPreimageClaimAnchorZeroConf(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using zero-conf
+ // anchor channels.
+ //
+ // Prepare params.
+ openChannelParams := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ ZeroConf: true,
+ CommitmentType: lnrpc.CommitmentType_ANCHORS,
+ }
+
+ // Prepare Carol's node config to enable zero-conf and anchor.
+ cfg := node.CfgZeroConf
+ cfgs := [][]string{cfg, cfg, cfg}
+
+ runMultiHopReceiverPreimageClaim(ht, cfgs, openChannelParams)
+}
+
+// testMultiHopReceiverPreimageClaimSimpleTaproot tests
+// `runMultiHopReceiverPreimageClaim` with simple taproot channels.
+func testMultiHopReceiverPreimageClaimSimpleTaproot(ht *lntest.HarnessTest) {
+ c := lnrpc.CommitmentType_SIMPLE_TAPROOT
+
+ // Create a three hop network: Alice -> Bob -> Carol, using simple
+ // taproot channels.
+ //
+ // Prepare params.
+ openChannelParams := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ CommitmentType: c,
+ Private: true,
+ }
+
+ cfg := node.CfgSimpleTaproot
+ cfgs := [][]string{cfg, cfg, cfg}
+
+ runMultiHopReceiverPreimageClaim(ht, cfgs, openChannelParams)
+}
+
+// testMultiHopReceiverPreimageClaimSimpleTaproot tests
+// `runMultiHopReceiverPreimageClaim` with zero-conf simple taproot channels.
+func testMultiHopReceiverPreimageClaimSimpleTaprootZeroConf(
+ ht *lntest.HarnessTest) {
+
+ c := lnrpc.CommitmentType_SIMPLE_TAPROOT
+
+ // Create a three hop network: Alice -> Bob -> Carol, using zero-conf
+ // simple taproot channels.
+ //
+ // Prepare params.
+ openChannelParams := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ ZeroConf: true,
+ CommitmentType: c,
+ Private: true,
+ }
+
+ // Prepare Carol's node config to enable zero-conf and leased
+ // channel.
+ cfg := node.CfgSimpleTaproot
+ cfg = append(cfg, node.CfgZeroConf...)
+ cfgs := [][]string{cfg, cfg, cfg}
+
+ runMultiHopReceiverPreimageClaim(ht, cfgs, openChannelParams)
+}
+
+// testMultiHopReceiverPreimageClaimLeased tests
+// `runMultiHopReceiverPreimageClaim` with script enforce lease channels.
+func testMultiHopReceiverPreimageClaimLeased(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using leased
+ // channels.
+ //
+ // Prepare params.
+ openChannelParams := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ CommitmentType: leasedType,
+ }
+
+ cfg := node.CfgLeased
+ cfgs := [][]string{cfg, cfg, cfg}
+
+ runMultiHopReceiverPreimageClaim(ht, cfgs, openChannelParams)
+}
+
+// testMultiHopReceiverPreimageClaimLeased tests
+// `runMultiHopReceiverPreimageClaim` with zero-conf script enforce lease
+// channels.
+func testMultiHopReceiverPreimageClaimLeasedZeroConf(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using zero-conf
+ // anchor channels.
+ //
+ // Prepare params.
+ openChannelParams := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ ZeroConf: true,
+ CommitmentType: leasedType,
+ }
+
+ // Prepare Carol's node config to enable zero-conf and leased
+ // channel.
+ cfg := node.CfgLeased
+ cfg = append(cfg, node.CfgZeroConf...)
+ cfgs := [][]string{cfg, cfg, cfg}
+
+ runMultiHopReceiverPreimageClaim(ht, cfgs, openChannelParams)
+}
+
+// runMultiHopReceiverClaim tests that in the multi-hop setting, if the
+// receiver of an HTLC knows the preimage, but wasn't able to settle the HTLC
+// off-chain, then it goes on chain to claim the HTLC uing the HTLC success
+// transaction. In this scenario, the node that sent the outgoing HTLC should
+// extract the preimage from the sweep transaction, and finish settling the
+// HTLC backwards into the route.
+func runMultiHopReceiverPreimageClaim(ht *lntest.HarnessTest,
+ cfgs [][]string, params lntest.OpenChannelParams) {
+
+ // Set the min relay feerate to be 10 sat/vbyte so the non-CPFP anchor
+ // is never swept.
+ //
+ // TODO(yy): delete this line once the normal anchor sweeping is
+ // removed.
+ ht.SetMinRelayFeerate(10_000)
+
+ // Create a three hop network: Alice -> Bob -> Carol.
+ chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params)
+ alice, bob, carol := nodes[0], nodes[1], nodes[2]
+ bobChanPoint := chanPoints[1]
+
+ ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
+
+ // For neutrino backend, we need to one more UTXO for Carol so she can
+ // sweep her outputs.
+ if ht.IsNeutrinoBackend() {
+ ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
+ }
+
+ // Fund Carol one UTXO so she can sweep outputs.
+ ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
+
+ // If this is a taproot channel, then we'll need to make some manual
+ // route hints so Alice can actually find a route.
+ var routeHints []*lnrpc.RouteHint
+ if params.CommitmentType == lnrpc.CommitmentType_SIMPLE_TAPROOT {
+ routeHints = makeRouteHints(bob, carol, params.ZeroConf)
+ }
+
+ // With the network active, we'll now add a new hodl invoice at Carol's
+ // end. Make sure the cltv expiry delta is large enough, otherwise Bob
+ // won't send out the outgoing htlc.
+ var preimage lntypes.Preimage
+ copy(preimage[:], ht.Random32Bytes())
+ payHash := preimage.Hash()
+
+ invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
+ Value: invoiceAmt,
+ CltvExpiry: finalCltvDelta,
+ Hash: payHash[:],
+ RouteHints: routeHints,
+ }
+ carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq)
+
+ // Subscribe the invoice.
+ stream := carol.RPC.SubscribeSingleInvoice(payHash[:])
+
+ // Now that we've created the invoice, we'll send a single payment from
+ // Alice to Carol. We won't wait for the response however, as Carol
+ // will not immediately settle the payment.
+ req := &routerrpc.SendPaymentRequest{
+ PaymentRequest: carolInvoice.PaymentRequest,
+ TimeoutSeconds: 60,
+ FeeLimitMsat: noFeeLimitMsat,
+ }
+ ht.SendPaymentAssertInflight(alice, req)
+
+ // At this point, all 3 nodes should now have an active channel with
+ // the created HTLC pending on all of them.
+ // At this point, all 3 nodes should now have an active channel with
+ // the created HTLCs pending on all of them.
+ //
+ // Alice should have one outgoing HTLCs on channel Alice -> Bob.
+ ht.AssertNumActiveHtlcs(alice, 1)
+
+ // Bob should have one incoming HTLC on channel Alice -> Bob, and one
+ // outgoing HTLC on channel Bob -> Carol.
+ ht.AssertNumActiveHtlcs(bob, 2)
+
+ // Carol should have one incoming HTLC on channel Bob -> Carol.
+ ht.AssertNumActiveHtlcs(carol, 1)
+
+ // Wait for Carol to mark invoice as accepted. There is a small gap to
+ // bridge between adding the htlc to the channel and executing the exit
+ // hop logic.
+ ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED)
+
+ // Stop Bob so he won't be able to settle the incoming htlc.
+ restartBob := ht.SuspendNode(bob)
+
+ // Settle invoice. This will just mark the invoice as settled, as there
+ // is no link anymore to remove the htlc from the commitment tx. For
+ // this test, it is important to actually settle and not leave the
+ // invoice in the accepted state, because without a known preimage, the
+ // channel arbitrator won't go to chain.
+ carol.RPC.SettleInvoice(preimage[:])
+
+ // We now advance the block height to the point where Carol will force
+ // close her channel with Bob, broadcast the closing tx but keep it
+ // unconfirmed.
+ numBlocks := padCLTV(uint32(
+ invoiceReq.CltvExpiry - incomingBroadcastDelta,
+ ))
+
+ // Now we'll mine enough blocks to prompt Carol to actually go to the
+ // chain in order to sweep her HTLC since the value is high enough.
+ ht.MineBlocks(int(numBlocks))
+
+ // Carol's force close tx should have the following outputs,
+ // 1. anchor output.
+ // 2. to_local output, which is CSV locked.
+ // 3. incoming HTLC output, which she has the preimage to settle.
+ //
+ // Carol's anchor output should be offered to her sweeper since she has
+ // time-sensitive HTLCs - we expect both anchors to be offered, while
+ // the sweeping of the remote anchor will be marked as failed due to
+ // `testmempoolaccept` check.
+ //
+ // For neutrino backend, there's no way to know the sweeping of the
+ // remote anchor is failed, so Carol still sees two pending sweeps.
+ if ht.IsNeutrinoBackend() {
+ ht.AssertNumPendingSweeps(carol, 2)
+ } else {
+ ht.AssertNumPendingSweeps(carol, 1)
+ }
+
+ // We expect to see tow txns in the mempool,
+ // 1. Carol's force close tx.
+ // 2. Carol's anchor sweep tx.
+ ht.AssertNumTxsInMempool(2)
+
+ // Mine a block to confirm the closing tx and the anchor sweeping tx.
+ ht.MineBlocksAndAssertNumTxes(1, 2)
+
+ ht.Log("Current height", ht.CurrentHeight())
+
+ // After the force close tx is mined, Carol should offer her second
+ // level HTLC tx to the sweeper.
+ ht.AssertNumPendingSweeps(carol, 1)
+
+ // Restart bob again.
+ require.NoError(ht, restartBob())
+
+ // Once Bob is online, he should notice Carol's second level tx in the
+ // mempool, he will extract the preimage and settle the HTLC back
+ // off-chain. He will also try to sweep his anchor and to_local
+ // outputs, with the anchor output being skipped due to it being
+ // uneconomical.
+ if params.CommitmentType == leasedType {
+ // For leased channels, Bob cannot sweep his to_local output
+ // yet since it's timelocked, so we only see his anchor input.
+ ht.AssertNumPendingSweeps(bob, 1)
+ } else {
+ // For non-leased channels, Bob should have two pending sweeps,
+ // 1. to_local output.
+ // 2. anchor output, tho it won't be swept due to it being
+ // uneconomical.
+ ht.AssertNumPendingSweeps(bob, 2)
+ }
+
+ // Mine an empty block the for neutrino backend. We need this step to
+ // trigger Bob's chain watcher to detect the force close tx. Deep down,
+ // this happens because the notification system for neutrino is very
+ // different from others. Specifically, when a block contains the force
+ // close tx is notified, these two calls,
+ // - RegisterBlockEpochNtfn, will notify the block first.
+ // - RegisterSpendNtfn, will wait for the neutrino notifier to sync to
+ // the block, then perform a GetUtxo, which, by the time the spend
+ // details are sent, the blockbeat is considered processed in Bob's
+ // chain watcher.
+ //
+ // TODO(yy): refactor txNotifier to fix the above issue.
+ if ht.IsNeutrinoBackend() {
+ ht.MineEmptyBlocks(1)
+ }
+
+ if params.CommitmentType == leasedType {
+ // We expect to see 1 txns in the mempool,
+ // - Carol's second level HTLC sweep tx.
+ // We now mine a block to confirm it.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+ } else {
+ // We expect to see 2 txns in the mempool,
+ // - Bob's to_local sweep tx.
+ // - Carol's second level HTLC sweep tx.
+ // We now mine a block to confirm the sweeping txns.
+ ht.MineBlocksAndAssertNumTxes(1, 2)
+ }
+
+ // Once the second-level transaction confirmed, Bob should have
+ // extracted the preimage from the chain, and sent it back to Alice,
+ // clearing the HTLC off-chain.
+ ht.AssertNumActiveHtlcs(alice, 0)
+
+ // Check that the Alice's payment is correctly marked succeeded.
+ ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED)
+
+ // Carol's pending channel report should now show two outputs under
+ // limbo: her commitment output, as well as the second-layer claim
+ // output, and the pending HTLC should also now be in stage 2.
+ ht.AssertNumHTLCsAndStage(carol, bobChanPoint, 1, 2)
+
+ // If we mine 4 additional blocks, then Carol can sweep the second
+ // level HTLC output once the CSV expires.
+ ht.MineBlocks(defaultCSV - 1)
+
+ // Assert Carol has the pending HTLC sweep.
+ ht.AssertNumPendingSweeps(carol, 1)
+
+ // We should have a new transaction in the mempool.
+ ht.AssertNumTxsInMempool(1)
+
+ // Finally, if we mine an additional block to confirm Carol's second
+ // level success transaction. Carol should not show a pending channel
+ // in her report afterwards.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+ ht.AssertNumPendingForceClose(carol, 0)
+
+ // The invoice should show as settled for Carol, indicating that it was
+ // swept on-chain.
+ ht.AssertInvoiceSettled(carol, carolInvoice.PaymentAddr)
+
+ // For leased channels, Bob still has his commit output to sweep to
+ // since he incurred an additional CLTV from being the channel
+ // initiator.
+ if params.CommitmentType == leasedType {
+ resp := ht.AssertNumPendingForceClose(bob, 1)[0]
+ require.Positive(ht, resp.LimboBalance)
+ require.Positive(ht, resp.BlocksTilMaturity)
+
+ // Mine enough blocks for Bob's commit output's CLTV to expire
+ // and sweep it.
+ ht.MineBlocks(int(resp.BlocksTilMaturity))
+
+ // Bob should have two pending inputs to be swept, the commit
+ // output and the anchor output.
+ ht.AssertNumPendingSweeps(bob, 2)
+
+ // Mine a block to confirm the commit output sweep.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+ }
+
+ // Assert Bob also sees the channel as closed.
+ ht.AssertNumPendingForceClose(bob, 0)
+}
+
+// testLocalForceCloseBeforeTimeoutAnchor tests
+// `runLocalForceCloseBeforeHtlcTimeout` with anchor channel.
+func testLocalForceCloseBeforeTimeoutAnchor(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using anchor
+ // channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{Amt: chanAmt}
+
+ cfg := node.CfgAnchor
+ cfgCarol := append(cfg, "--hodl.exit-settle")
+ cfgs := [][]string{cfg, cfg, cfgCarol}
+
+ runLocalForceCloseBeforeHtlcTimeout(ht, cfgs, params)
+}
+
+// testLocalForceCloseBeforeTimeoutAnchorZeroConf tests
+// `runLocalForceCloseBeforeHtlcTimeout` with zero-conf anchor channel.
+func testLocalForceCloseBeforeTimeoutAnchorZeroConf(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using zero-conf
+ // anchor channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ ZeroConf: true,
+ CommitmentType: lnrpc.CommitmentType_ANCHORS,
+ }
+
+ // Prepare Carol's node config to enable zero-conf and anchor.
+ cfg := node.CfgZeroConf
+ cfgCarol := append(cfg, "--hodl.exit-settle")
+ cfgs := [][]string{cfg, cfg, cfgCarol}
+
+ runLocalForceCloseBeforeHtlcTimeout(ht, cfgs, params)
+}
+
+// testLocalForceCloseBeforeTimeoutSimpleTaproot tests
+// `runLocalForceCloseBeforeHtlcTimeout` with simple taproot channel.
+func testLocalForceCloseBeforeTimeoutSimpleTaproot(ht *lntest.HarnessTest) {
+ c := lnrpc.CommitmentType_SIMPLE_TAPROOT
+
+ // Create a three hop network: Alice -> Bob -> Carol, using simple
+ // taproot channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ CommitmentType: c,
+ Private: true,
+ }
+
+ cfg := node.CfgSimpleTaproot
+ cfgCarol := append(cfg, "--hodl.exit-settle")
+ cfgs := [][]string{cfg, cfg, cfgCarol}
+
+ runLocalForceCloseBeforeHtlcTimeout(ht, cfgs, params)
+}
+
+// testLocalForceCloseBeforeTimeoutSimpleTaproot tests
+// `runLocalForceCloseBeforeHtlcTimeout` with zero-conf simple taproot channel.
+func testLocalForceCloseBeforeTimeoutSimpleTaprootZeroConf(
+ ht *lntest.HarnessTest) {
+
+ c := lnrpc.CommitmentType_SIMPLE_TAPROOT
+
+ // Create a three hop network: Alice -> Bob -> Carol, using zero-conf
+ // simple taproot channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ ZeroConf: true,
+ CommitmentType: c,
+ Private: true,
+ }
+
+ // Prepare Carol's node config to enable zero-conf and leased channel.
+ cfg := node.CfgSimpleTaproot
+ cfg = append(cfg, node.CfgZeroConf...)
+ cfgCarol := append(cfg, "--hodl.exit-settle")
+ cfgs := [][]string{cfg, cfg, cfgCarol}
+
+ runLocalForceCloseBeforeHtlcTimeout(ht, cfgs, params)
+}
+
+// testLocalForceCloseBeforeTimeoutLeased tests
+// `runLocalForceCloseBeforeHtlcTimeout` with script enforced lease channel.
+func testLocalForceCloseBeforeTimeoutLeased(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using leased
+ // channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ CommitmentType: leasedType,
+ }
+
+ cfg := node.CfgLeased
+ cfgCarol := append(cfg, "--hodl.exit-settle")
+ cfgs := [][]string{cfg, cfg, cfgCarol}
+
+ runLocalForceCloseBeforeHtlcTimeout(ht, cfgs, params)
+}
+
+// testLocalForceCloseBeforeTimeoutLeased tests
+// `runLocalForceCloseBeforeHtlcTimeout` with zero-conf script enforced lease
+// channel.
+func testLocalForceCloseBeforeTimeoutLeasedZeroConf(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using zero-conf
+ // anchor channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ ZeroConf: true,
+ CommitmentType: leasedType,
+ }
+
+ // Prepare Carol's node config to enable zero-conf and leased
+ // channel.
+ cfg := node.CfgLeased
+ cfg = append(cfg, node.CfgZeroConf...)
+ cfgCarol := append(cfg, "--hodl.exit-settle")
+ cfgs := [][]string{cfg, cfg, cfgCarol}
+
+ runLocalForceCloseBeforeHtlcTimeout(ht, cfgs, params)
+}
+
+// runLocalForceCloseBeforeHtlcTimeout tests that in a multi-hop HTLC scenario,
+// if the node that extended the HTLC to the final node closes their commitment
+// on-chain early, then it eventually recognizes this HTLC as one that's timed
+// out. At this point, the node should timeout the HTLC using the HTLC timeout
+// transaction, then cancel it backwards as normal.
+func runLocalForceCloseBeforeHtlcTimeout(ht *lntest.HarnessTest,
+ cfgs [][]string, params lntest.OpenChannelParams) {
+
+ // Set the min relay feerate to be 10 sat/vbyte so the non-CPFP anchor
+ // is never swept.
+ //
+ // TODO(yy): delete this line once the normal anchor sweeping is
+ // removed.
+ ht.SetMinRelayFeerate(10_000)
+
+ // Create a three hop network: Alice -> Bob -> Carol.
+ chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params)
+ alice, bob, carol := nodes[0], nodes[1], nodes[2]
+ bobChanPoint := chanPoints[1]
+
+ // With our channels set up, we'll then send a single HTLC from Alice
+ // to Carol. As Carol is in hodl mode, she won't settle this HTLC which
+ // opens up the base for out tests.
+
+ // If this is a taproot channel, then we'll need to make some manual
+ // route hints so Alice can actually find a route.
+ var routeHints []*lnrpc.RouteHint
+ if params.CommitmentType == lnrpc.CommitmentType_SIMPLE_TAPROOT {
+ routeHints = makeRouteHints(bob, carol, params.ZeroConf)
+ }
+
+ // We'll now send a single HTLC across our multi-hop network.
+ carolPubKey := carol.PubKey[:]
+ payHash := ht.Random32Bytes()
+ req := &routerrpc.SendPaymentRequest{
+ Dest: carolPubKey,
+ Amt: int64(htlcAmt),
+ PaymentHash: payHash,
+ FinalCltvDelta: finalCltvDelta,
+ TimeoutSeconds: 60,
+ FeeLimitMsat: noFeeLimitMsat,
+ RouteHints: routeHints,
+ }
+ ht.SendPaymentAssertInflight(alice, req)
+
+ // At this point, all 3 nodes should now have an active channel with
+ // the created HTLC pending on all of them.
+ // At this point, all 3 nodes should now have an active channel with
+ // the created HTLCs pending on all of them.
+ //
+ // Alice should have one outgoing HTLC on channel Alice -> Bob.
+ ht.AssertNumActiveHtlcs(alice, 1)
+
+ // Bob should have one incoming HTLC on channel Alice -> Bob, and one
+ // outgoing HTLC on channel Bob -> Carol.
+ ht.AssertNumActiveHtlcs(bob, 2)
+
+ // Carol should have one incoming HTLC on channel Bob -> Carol.
+ ht.AssertNumActiveHtlcs(carol, 1)
+
+ // Now that all parties have the HTLC locked in, we'll immediately
+ // force close the Bob -> Carol channel. This should trigger contract
+ // resolution mode for both of them.
+ stream, _ := ht.CloseChannelAssertPending(bob, bobChanPoint, true)
+ ht.AssertStreamChannelForceClosed(bob, bobChanPoint, true, stream)
+
+ // Bob's force close tx should have the following outputs,
+ // 1. anchor output.
+ // 2. to_local output, which is CSV locked.
+ // 3. outgoing HTLC output, which hasn't expired yet.
+ //
+ // The channel close has anchors, we should expect to see both Bob and
+ // Carol has a pending sweep request for the anchor sweep.
+ ht.AssertNumPendingSweeps(carol, 1)
+ anchorSweep := ht.AssertNumPendingSweeps(bob, 1)[0]
+
+ // We expcet Bob's anchor sweep to be a non-CPFP anchor sweep now.
+ // Although he has time-sensitive outputs, which means initially his
+ // anchor output was used for CPFP, this anchor will be replaced by a
+ // new anchor sweeping request once his force close tx is confirmed in
+ // the above block. The timeline goes as follows:
+ // 1. At block 447, Bob force closes his channel with Carol, which
+ // caused the channel arbitartor to create a CPFP anchor sweep.
+ // 2. This force close tx was mined in AssertStreamChannelForceClosed,
+ // and we are now in block 448.
+ // 3. Since the blockbeat is processed via the chain [ChainArbitrator
+ // -> chainWatcher -> channelArbitrator -> Sweeper -> TxPublisher],
+ // when it reaches `chainWatcher`, Bob will detect the confirmed
+ // force close tx and notifies `channelArbitrator`. In response,
+ // `channelArbitrator` will advance to `StateContractClosed`, in
+ // which it will prepare an anchor resolution that's non-CPFP, send
+ // it to the sweeper to replace the CPFP anchor sweep.
+ // 4. By the time block 448 reaches `Sweeper`, the old CPFP anchor
+ // sweep has already been replaced with the new non-CPFP anchor
+ // sweep.
+ require.EqualValues(ht, 330, anchorSweep.Budget, "expected 330 sat "+
+ "budget, got %v", anchorSweep.Budget)
+
+ // Before the HTLC times out, we'll need to assert that Bob broadcasts
+ // a sweep tx for his commit output. Note that if the channel has a
+ // script-enforced lease, then Bob will have to wait for an additional
+ // CLTV before sweeping it.
+ if params.CommitmentType != leasedType {
+ // The sweeping tx is broadcast on the block CSV-1 so mine one
+ // block less than defaultCSV in order to perform mempool
+ // assertions.
+ ht.MineBlocks(int(defaultCSV - 1))
+
+ // Mine a block to confirm Bob's to_local sweep.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+ }
+
+ // We'll now mine enough blocks for the HTLC to expire. After this, Bob
+ // should hand off the now expired HTLC output to the sweeper.
+ resp := ht.AssertNumPendingForceClose(bob, 1)[0]
+ require.Equal(ht, 1, len(resp.PendingHtlcs))
+
+ ht.Logf("Bob's timelock to_local output=%v, timelock on second stage "+
+ "htlc=%v", resp.BlocksTilMaturity,
+ resp.PendingHtlcs[0].BlocksTilMaturity)
+
+ ht.MineBlocks(int(resp.PendingHtlcs[0].BlocksTilMaturity))
+
+ // Bob's pending channel report should show that he has a single HTLC
+ // that's now in stage one.
+ ht.AssertNumHTLCsAndStage(bob, bobChanPoint, 1, 1)
+
+ // Bob should have two pending sweep requests,
+ // 1. the anchor sweep.
+ // 2. the outgoing HTLC sweep.
+ ht.AssertNumPendingSweeps(bob, 2)
+
+ // Bob's outgoing HTLC sweep should be broadcast now. Mine a block to
+ // confirm it.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+
+ // With the second layer timeout tx confirmed, Bob should have canceled
+ // backwards the HTLC that Carol sent.
+ ht.AssertNumActiveHtlcs(bob, 0)
+
+ // Additionally, Bob should now show that HTLC as being advanced to the
+ // second stage.
+ ht.AssertNumHTLCsAndStage(bob, bobChanPoint, 1, 2)
+
+ // Get the expiry height of the CSV-locked HTLC.
+ resp = ht.AssertNumPendingForceClose(bob, 1)[0]
+ require.Equal(ht, 1, len(resp.PendingHtlcs))
+ pendingHtlc := resp.PendingHtlcs[0]
+ require.Positive(ht, pendingHtlc.BlocksTilMaturity)
+
+ ht.Logf("Bob's timelock to_local output=%v, timelock on second stage "+
+ "htlc=%v", resp.BlocksTilMaturity,
+ resp.PendingHtlcs[0].BlocksTilMaturity)
+
+ // Mine enough blocks for the HTLC to expire.
+ ht.MineBlocks(int(pendingHtlc.BlocksTilMaturity))
+
+ // Based on this is a leased channel or not, Bob may still need to
+ // sweep his to_local output.
+ if params.CommitmentType == leasedType {
+ // Bob should have three pending sweep requests,
+ // 1. the anchor sweep.
+ // 2. the second-level HTLC sweep.
+ // 3. the to_local output sweep, which is CSV+CLTV locked, is
+ // now mature.
+ //
+ // The test is setup such that the to_local and the
+ // second-level HTLC sweeps share the same deadline, which
+ // means they will be swept in the same tx.
+ ht.AssertNumPendingSweeps(bob, 3)
+ } else {
+ // Bob should have two pending sweeps,
+ // 1. the anchor sweep.
+ // 2. the second-level HTLC sweep.
+ ht.AssertNumPendingSweeps(bob, 2)
+ }
+
+ // Now that the CSV timelock has expired, mine a block to confirm the
+ // sweep.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+
+ // At this point, Bob should no longer show any channels as pending
+ // close.
+ ht.AssertNumPendingForceClose(bob, 0)
+}
+
+// testRemoteForceCloseBeforeTimeoutAnchor tests
+// `runRemoteForceCloseBeforeHtlcTimeout` with anchor channel.
+func testRemoteForceCloseBeforeTimeoutAnchor(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using anchor
+ // channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{Amt: chanAmt}
+
+ cfg := node.CfgAnchor
+ cfgCarol := append(cfg, "--hodl.exit-settle")
+ cfgs := [][]string{cfg, cfg, cfgCarol}
+
+ runRemoteForceCloseBeforeHtlcTimeout(ht, cfgs, params)
+}
+
+// testRemoteForceCloseBeforeTimeoutAnchor tests
+// `runRemoteForceCloseBeforeHtlcTimeout` with zero-conf anchor channel.
+func testRemoteForceCloseBeforeTimeoutAnchorZeroConf(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using zero-conf
+ // anchor channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ ZeroConf: true,
+ CommitmentType: lnrpc.CommitmentType_ANCHORS,
+ }
+
+ // Prepare Carol's node config to enable zero-conf and anchor.
+ cfg := node.CfgZeroConf
+ cfgCarol := append(cfg, "--hodl.exit-settle")
+ cfgs := [][]string{cfg, cfg, cfgCarol}
+
+ runRemoteForceCloseBeforeHtlcTimeout(ht, cfgs, params)
+}
+
+// testRemoteForceCloseBeforeTimeoutSimpleTaproot tests
+// `runLocalForceCloseBeforeHtlcTimeout` with zero-conf simple taproot channel.
+func testRemoteForceCloseBeforeTimeoutSimpleTaprootZeroConf(
+ ht *lntest.HarnessTest) {
+
+ c := lnrpc.CommitmentType_SIMPLE_TAPROOT
+
+ // Create a three hop network: Alice -> Bob -> Carol, using zero-conf
+ // simple taproot channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ ZeroConf: true,
+ CommitmentType: c,
+ Private: true,
+ }
+
+ // Prepare Carol's node config to enable zero-conf and leased
+ // channel.
+ cfg := node.CfgSimpleTaproot
+ cfg = append(cfg, node.CfgZeroConf...)
+ cfgCarol := append(cfg, "--hodl.exit-settle")
+ cfgs := [][]string{cfg, cfg, cfgCarol}
+
+ runRemoteForceCloseBeforeHtlcTimeout(ht, cfgs, params)
+}
+
+// testRemoteForceCloseBeforeTimeoutSimpleTaproot tests
+// `runLocalForceCloseBeforeHtlcTimeout` with simple taproot channel.
+func testRemoteForceCloseBeforeTimeoutSimpleTaproot(ht *lntest.HarnessTest) {
+ c := lnrpc.CommitmentType_SIMPLE_TAPROOT
+
+ // Create a three hop network: Alice -> Bob -> Carol, using simple
+ // taproot channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ CommitmentType: c,
+ Private: true,
+ }
+
+ cfg := node.CfgSimpleTaproot
+ cfgCarol := append(cfg, "--hodl.exit-settle")
+ cfgs := [][]string{cfg, cfg, cfgCarol}
+
+ runRemoteForceCloseBeforeHtlcTimeout(ht, cfgs, params)
+}
+
+// testRemoteForceCloseBeforeTimeoutLeasedZeroConf tests
+// `runRemoteForceCloseBeforeHtlcTimeout` with zero-conf script enforced lease
+// channel.
+func testRemoteForceCloseBeforeTimeoutLeasedZeroConf(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using zero-conf
+ // anchor channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ ZeroConf: true,
+ CommitmentType: leasedType,
+ }
+
+ // Prepare Carol's node config to enable zero-conf and leased
+ // channel.
+ cfg := node.CfgLeased
+ cfg = append(cfg, node.CfgZeroConf...)
+ cfgCarol := append(cfg, "--hodl.exit-settle")
+ cfgs := [][]string{cfg, cfg, cfgCarol}
+
+ runRemoteForceCloseBeforeHtlcTimeout(ht, cfgs, params)
+}
+
+// testRemoteForceCloseBeforeTimeoutLeased tests
+// `runRemoteForceCloseBeforeHtlcTimeout` with script enforced lease channel.
+func testRemoteForceCloseBeforeTimeoutLeased(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using leased
+ // channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ CommitmentType: leasedType,
+ }
+
+ cfg := node.CfgLeased
+ cfgCarol := append(cfg, "--hodl.exit-settle")
+ cfgs := [][]string{cfg, cfg, cfgCarol}
+
+ runRemoteForceCloseBeforeHtlcTimeout(ht, cfgs, params)
+}
+
+// runRemoteForceCloseBeforeHtlcTimeout tests that if we extend a multi-hop
+// HTLC, and the final destination of the HTLC force closes the channel, then
+// we properly timeout the HTLC directly on *their* commitment transaction once
+// the timeout has expired. Once we sweep the transaction, we should also
+// cancel back the initial HTLC.
+func runRemoteForceCloseBeforeHtlcTimeout(ht *lntest.HarnessTest,
+ cfgs [][]string, params lntest.OpenChannelParams) {
+
+ // Set the min relay feerate to be 10 sat/vbyte so the non-CPFP anchor
+ // is never swept.
+ //
+ // TODO(yy): delete this line once the normal anchor sweeping is
+ // removed.
+ ht.SetMinRelayFeerate(10_000)
+
+ // Create a three hop network: Alice -> Bob -> Carol.
+ chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params)
+ alice, bob, carol := nodes[0], nodes[1], nodes[2]
+ bobChanPoint := chanPoints[1]
+
+ // If this is a taproot channel, then we'll need to make some manual
+ // route hints so Alice can actually find a route.
+ var routeHints []*lnrpc.RouteHint
+ if params.CommitmentType == lnrpc.CommitmentType_SIMPLE_TAPROOT {
+ routeHints = makeRouteHints(bob, carol, params.ZeroConf)
+ }
+
+ // With our channels set up, we'll then send a single HTLC from Alice
+ // to Carol. As Carol is in hodl mode, she won't settle this HTLC which
+ // opens up the base for out tests.
+ var preimage lntypes.Preimage
+ copy(preimage[:], ht.Random32Bytes())
+ payHash := preimage.Hash()
+ invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
+ Value: int64(htlcAmt),
+ CltvExpiry: finalCltvDelta,
+ Hash: payHash[:],
+ RouteHints: routeHints,
+ }
+ carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq)
+
+ // Subscribe the invoice.
+ stream := carol.RPC.SubscribeSingleInvoice(payHash[:])
+
+ req := &routerrpc.SendPaymentRequest{
+ PaymentRequest: carolInvoice.PaymentRequest,
+ TimeoutSeconds: 60,
+ FeeLimitMsat: noFeeLimitMsat,
+ }
+ ht.SendPaymentAssertInflight(alice, req)
+
+ // At this point, all 3 nodes should now have an active channel with
+ // the created HTLCs pending on all of them.
+ //
+ // Alice should have one outgoing HTLC on channel Alice -> Bob.
+ ht.AssertNumActiveHtlcs(alice, 1)
+
+ // Bob should have one incoming HTLC on channel Alice -> Bob, and one
+ // outgoing HTLC on channel Bob -> Carol.
+ ht.AssertNumActiveHtlcs(bob, 2)
+
+ // Carol should have one incoming HTLC on channel Bob -> Carol.
+ ht.AssertNumActiveHtlcs(carol, 1)
+
+ // At this point, we'll now instruct Carol to force close the tx. This
+ // will let us exercise that Bob is able to sweep the expired HTLC on
+ // Carol's version of the commitment tx.
+ closeStream, _ := ht.CloseChannelAssertPending(
+ carol, bobChanPoint, true,
+ )
+
+ // For anchor channels, the anchor won't be used for CPFP because
+ // channel arbitrator thinks Carol doesn't have preimage for her
+ // incoming HTLC on the commitment transaction Bob->Carol. Although
+ // Carol created this invoice, because it's a hold invoice, the
+ // preimage won't be generated automatically.
+ ht.AssertStreamChannelForceClosed(
+ carol, bobChanPoint, true, closeStream,
+ )
+
+ // At this point, Bob should have a pending force close channel as
+ // Carol has gone directly to chain.
+ ht.AssertNumPendingForceClose(bob, 1)
+
+ // Carol will offer her anchor to her sweeper.
+ ht.AssertNumPendingSweeps(carol, 1)
+
+ // Bob should offered the anchor output to his sweeper.
+ if params.CommitmentType == leasedType {
+ // For script enforced lease channels, Bob can sweep his anchor
+ // output immediately although it will be skipped due to it
+ // being uneconomical. His to_local output is CLTV locked so it
+ // cannot be swept yet.
+ ht.AssertNumPendingSweeps(bob, 1)
+ } else {
+ // For non-leased channels, Bob can sweep his commit and anchor
+ // outputs immediately.
+ ht.AssertNumPendingSweeps(bob, 2)
+
+ // We expect to see only one sweeping tx to be published from
+ // Bob, which sweeps his to_local output. His anchor output
+ // won't be swept due it being uneconomical. For Carol, since
+ // her anchor is not used for CPFP, it'd be also uneconomical
+ // to sweep so it will fail.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+ }
+
+ // Next, we'll mine enough blocks for the HTLC to expire. At this
+ // point, Bob should hand off the output to his sweeper, which will
+ // broadcast a sweep transaction.
+ resp := ht.AssertNumPendingForceClose(bob, 1)[0]
+ require.Equal(ht, 1, len(resp.PendingHtlcs))
+
+ ht.Logf("Bob's timelock to_local output=%v, timelock on second stage "+
+ "htlc=%v", resp.BlocksTilMaturity,
+ resp.PendingHtlcs[0].BlocksTilMaturity)
+
+ ht.MineBlocks(int(resp.PendingHtlcs[0].BlocksTilMaturity))
+
+ // If we check Bob's pending channel report, it should show that he has
+ // a single HTLC that's now in the second stage, as it skipped the
+ // initial first stage since this is a direct HTLC.
+ ht.AssertNumHTLCsAndStage(bob, bobChanPoint, 1, 2)
+
+ // Bob should have two pending sweep requests,
+ // 1. the uneconomical anchor sweep.
+ // 2. the direct timeout sweep.
+ ht.AssertNumPendingSweeps(bob, 2)
+
+ // Bob's sweeping tx should now be found in the mempool.
+ sweepTx := ht.AssertNumTxsInMempool(1)[0]
+
+ // If we mine an additional block, then this should confirm Bob's tx
+ // which sweeps the direct HTLC output.
+ block := ht.MineBlocksAndAssertNumTxes(1, 1)[0]
+ ht.AssertTxInBlock(block, sweepTx)
+
+ // Now that the sweeping tx has been confirmed, Bob should cancel back
+ // that HTLC. As a result, Alice should not know of any active HTLC's.
+ ht.AssertNumActiveHtlcs(alice, 0)
+
+ // For script enforced lease channels, Bob still need to wait for the
+ // CLTV lock to expire before he can sweep his to_local output.
+ if params.CommitmentType == leasedType {
+ // Get the remaining blocks to mine.
+ resp = ht.AssertNumPendingForceClose(bob, 1)[0]
+ ht.MineBlocks(int(resp.BlocksTilMaturity))
+
+ // Assert the commit output has been offered to the sweeper.
+ // Bob should have two pending sweep requests - one for the
+ // commit output and one for the anchor output.
+ ht.AssertNumPendingSweeps(bob, 2)
+
+ // Mine the to_local sweep tx.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+ }
+
+ // Now we'll check Bob's pending channel report. Since this was Carol's
+ // commitment, he doesn't have to wait for any CSV delays, but he may
+ // still need to wait for a CLTV on his commit output to expire
+ // depending on the commitment type.
+ ht.AssertNumPendingForceClose(bob, 0)
+
+ // While we're here, we assert that our expired invoice's state is
+ // correctly updated, and can no longer be settled.
+ ht.AssertInvoiceState(stream, lnrpc.Invoice_CANCELED)
+}
+
+// testLocalClaimIncomingHTLCAnchorZeroConf tests `runLocalClaimIncomingHTLC`
+// with zero-conf anchor channel.
+func testLocalClaimIncomingHTLCAnchorZeroConf(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using zero-conf
+ // anchor channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ ZeroConf: true,
+ CommitmentType: lnrpc.CommitmentType_ANCHORS,
+ }
+
+ // Prepare Carol's node config to enable zero-conf and anchor.
+ cfg := node.CfgZeroConf
+ cfgs := [][]string{cfg, cfg, cfg}
+
+ runLocalClaimIncomingHTLC(ht, cfgs, params)
+}
+
+// testLocalClaimIncomingHTLCAnchor tests `runLocalClaimIncomingHTLC` with
+// anchor channel.
+func testLocalClaimIncomingHTLCAnchor(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using anchor
+ // channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{Amt: chanAmt}
+
+ cfg := node.CfgAnchor
+ cfgs := [][]string{cfg, cfg, cfg}
+
+ runLocalClaimIncomingHTLC(ht, cfgs, params)
+}
+
+// testLocalClaimIncomingHTLCSimpleTaprootZeroConf tests
+// `runLocalClaimIncomingHTLC` with zero-conf simple taproot channel.
+func testLocalClaimIncomingHTLCSimpleTaprootZeroConf(ht *lntest.HarnessTest) {
+ c := lnrpc.CommitmentType_SIMPLE_TAPROOT
+
+ // Create a three hop network: Alice -> Bob -> Carol, using zero-conf
+ // simple taproot channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ ZeroConf: true,
+ CommitmentType: c,
+ Private: true,
+ }
+
+ // Prepare Carol's node config to enable zero-conf and leased channel.
+ cfg := node.CfgSimpleTaproot
+ cfg = append(cfg, node.CfgZeroConf...)
+ cfgs := [][]string{cfg, cfg, cfg}
+
+ runLocalClaimIncomingHTLC(ht, cfgs, params)
+}
+
+// testLocalClaimIncomingHTLCSimpleTaproot tests `runLocalClaimIncomingHTLC`
+// with simple taproot channel.
+func testLocalClaimIncomingHTLCSimpleTaproot(ht *lntest.HarnessTest) {
+ c := lnrpc.CommitmentType_SIMPLE_TAPROOT
+
+ // Create a three hop network: Alice -> Bob -> Carol, using simple
+ // taproot channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ CommitmentType: c,
+ Private: true,
+ }
+
+ cfg := node.CfgSimpleTaproot
+ cfgs := [][]string{cfg, cfg, cfg}
+
+ runLocalClaimIncomingHTLC(ht, cfgs, params)
+}
+
+// runLocalClaimIncomingHTLC tests that in a multi-hop HTLC scenario, if we
+// force close a channel with an incoming HTLC, and later find out the preimage
+// via the witness beacon, we properly settle the HTLC on-chain using the HTLC
+// success transaction in order to ensure we don't lose any funds.
+func runLocalClaimIncomingHTLC(ht *lntest.HarnessTest,
+ cfgs [][]string, params lntest.OpenChannelParams) {
+
+ // Set the min relay feerate to be 10 sat/vbyte so the non-CPFP anchor
+ // is never swept.
+ //
+ // TODO(yy): delete this line once the normal anchor sweeping is
+ // removed.
+ ht.SetMinRelayFeerate(10_000)
+
+ // Create a three hop network: Alice -> Bob -> Carol.
+ chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params)
+ alice, bob, carol := nodes[0], nodes[1], nodes[2]
+ aliceChanPoint := chanPoints[0]
+
+ // Fund Carol one UTXO so she can sweep outputs.
+ ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
+
+ // If this is a taproot channel, then we'll need to make some manual
+ // route hints so Alice can actually find a route.
+ var routeHints []*lnrpc.RouteHint
+ if params.CommitmentType == lnrpc.CommitmentType_SIMPLE_TAPROOT {
+ routeHints = makeRouteHints(bob, carol, params.ZeroConf)
+ }
+
+ // With the network active, we'll now add a new hodl invoice at Carol's
+ // end. Make sure the cltv expiry delta is large enough, otherwise Bob
+ // won't send out the outgoing htlc.
+ preimage := ht.RandomPreimage()
+ payHash := preimage.Hash()
+
+ invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
+ Value: invoiceAmt,
+ CltvExpiry: finalCltvDelta,
+ Hash: payHash[:],
+ RouteHints: routeHints,
+ }
+ carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq)
+
+ // Subscribe the invoice.
+ stream := carol.RPC.SubscribeSingleInvoice(payHash[:])
+
+ // Now that we've created the invoice, we'll send a single payment from
+ // Alice to Carol. We won't wait for the response however, as Carol
+ // will not immediately settle the payment.
+ req := &routerrpc.SendPaymentRequest{
+ PaymentRequest: carolInvoice.PaymentRequest,
+ TimeoutSeconds: 60,
+ FeeLimitMsat: noFeeLimitMsat,
+ }
+ ht.SendPaymentAssertInflight(alice, req)
+
+ // At this point, all 3 nodes should now have an active channel with
+ // the created HTLC pending on all of them.
+ // At this point, all 3 nodes should now have an active channel with
+ // the created HTLCs pending on all of them.
+ //
+ // Alice should have one outgoing HTLC on channel Alice -> Bob.
+ ht.AssertNumActiveHtlcs(alice, 1)
+
+ // Bob should have one incoming HTLC on channel Alice -> Bob, and one
+ // outgoing HTLC on channel Bob -> Carol.
+ ht.AssertNumActiveHtlcs(bob, 2)
+
+ // Carol should have one incoming HTLC on channel Bob -> Carol.
+ ht.AssertNumActiveHtlcs(carol, 1)
+
+ // Wait for carol to mark invoice as accepted. There is a small gap to
+ // bridge between adding the htlc to the channel and executing the exit
+ // hop logic.
+ ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED)
+
+ // At this point, Bob decides that he wants to exit the channel
+ // Alice=>Bob immediately, so he force closes his commitment tx.
+ closeStream, _ := ht.CloseChannelAssertPending(
+ bob, aliceChanPoint, true,
+ )
+
+ // For anchor channels, the anchor won't be used for CPFP as there's no
+ // deadline pressure for Bob on the channel Alice->Bob at the moment.
+ // For Bob's local commitment tx, there's only one incoming HTLC which
+ // he doesn't have the preimage yet.
+ hasAnchorSweep := false
+ bobForceClose := ht.AssertStreamChannelForceClosed(
+ bob, aliceChanPoint, hasAnchorSweep, closeStream,
+ )
+
+ // Alice will offer her to_local and anchor outputs to her sweeper.
+ ht.AssertNumPendingSweeps(alice, 2)
+
+ // Bob will offer his anchor to his sweeper.
+ ht.AssertNumPendingSweeps(bob, 1)
+
+ // Assert the expected num of txns are found in the mempool.
+ //
+ // We expect to see only one sweeping tx to be published from Alice,
+ // which sweeps her to_local output (which is to to_remote on Bob's
+ // commit tx). Her anchor output won't be swept as it's uneconomical.
+ // For Bob, since his anchor is not used for CPFP, it'd be uneconomical
+ // to sweep so it will fail.
+ ht.AssertNumTxsInMempool(1)
+
+ // Mine a block to confirm Alice's sweeping tx.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+
+ // Suspend Bob to force Carol to go to chain.
+ restartBob := ht.SuspendNode(bob)
+
+ // Settle invoice. This will just mark the invoice as settled, as there
+ // is no link anymore to remove the htlc from the commitment tx. For
+ // this test, it is important to actually settle and not leave the
+ // invoice in the accepted state, because without a known preimage, the
+ // channel arbitrator won't go to chain.
+ carol.RPC.SettleInvoice(preimage[:])
+
+ // We now advance the block height to the point where Carol will force
+ // close her channel with Bob, broadcast the closing tx but keep it
+ // unconfirmed.
+ numBlocks := padCLTV(
+ uint32(invoiceReq.CltvExpiry - incomingBroadcastDelta),
+ )
+
+ // We've already mined 2 blocks at this point, so we only need to mine
+ // CLTV-2 blocks.
+ ht.MineBlocks(int(numBlocks - 2))
+
+ // Expect two txns in the mempool,
+ // - Carol's force close tx.
+ // - Carol's CPFP anchor sweeping tx.
+ // Mine a block to confirm them.
+ ht.MineBlocksAndAssertNumTxes(1, 2)
+
+ // After the force close tx is mined, Carol should offer her
+ // second-level success HTLC tx to her sweeper.
+ ht.AssertNumPendingSweeps(carol, 1)
+
+ // Restart bob again.
+ require.NoError(ht, restartBob())
+
+ // Once Bob is online and sees the force close tx Bob=>Carol, he will
+ // create a tx to sweep his commitment output. His anchor outputs will
+ // not be swept due to uneconomical. We expect to see three sweeping
+ // requests,
+ // - the commitment output.
+ // - the anchor output from channel Alice=>Bob.
+ // - the anchor output from channel Bob=>Carol.
+ ht.AssertNumPendingSweeps(bob, 3)
+
+ // Mine an empty block the for neutrino backend. We need this step to
+ // trigger Bob's chain watcher to detect the force close tx. Deep down,
+ // this happens because the notification system for neutrino is very
+ // different from others. Specifically, when a block contains the force
+ // close tx is notified, these two calls,
+ // - RegisterBlockEpochNtfn, will notify the block first.
+ // - RegisterSpendNtfn, will wait for the neutrino notifier to sync to
+ // the block, then perform a GetUtxo, which, by the time the spend
+ // details are sent, the blockbeat is considered processed in Bob's
+ // chain watcher.
+ //
+ // TODO(yy): refactor txNotifier to fix the above issue.
+ if ht.IsNeutrinoBackend() {
+ ht.MineEmptyBlocks(1)
+ }
+
+ // Assert txns can be found in the mempool.
+ //
+ // Carol will broadcast her sweeping tx and Bob will sweep his
+ // commitment anchor output, we'd expect to see two txns,
+ // - Carol's second level HTLC tx.
+ // - Bob's commitment output sweeping tx.
+ ht.AssertNumTxsInMempool(2)
+
+ // At this point we suspend Alice to make sure she'll handle the
+ // on-chain settle after a restart.
+ restartAlice := ht.SuspendNode(alice)
+
+ // Mine a block to confirm the sweeping txns made by Bob and Carol.
+ ht.MineBlocksAndAssertNumTxes(1, 2)
+
+ // When Bob notices Carol's second level tx in the block, he will
+ // extract the preimage and broadcast a second level tx to claim the
+ // HTLC in his (already closed) channel with Alice, which means Bob has
+ // three sweeping requests,
+ // - the second level HTLC tx from channel Alice=>Bob.
+ // - the anchor output from channel Alice=>Bob.
+ // - the anchor output from channel Bob=>Carol.
+ ht.AssertNumPendingSweeps(bob, 3)
+
+ // Mine a block to trigger the sweep. This is needed because the
+ // preimage extraction logic from the link is not managed by the
+ // blockbeat, which means the preimage may be sent to the contest
+ // resolver after it's launched.
+ //
+ // TODO(yy): Expose blockbeat to the link layer.
+ ht.MineEmptyBlocks(1)
+
+ // At this point, Bob should have broadcast his second layer success
+ // tx, and should have sent it to his sweeper.
+ //
+ // Check Bob's second level tx.
+ bobSecondLvlTx := ht.GetNumTxsFromMempool(1)[0]
+
+ // It should spend from the commitment in the channel with Alice.
+ ht.AssertTxSpendFrom(bobSecondLvlTx, bobForceClose)
+
+ // We'll now mine a block which should confirm Bob's second layer tx.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+
+ // Bob should consider the channel Bob=>Carol closed, and channel
+ // Alice=>Bob pending close.
+ ht.AssertNumPendingForceClose(bob, 1)
+
+ // Now that the preimage from Bob has hit the chain, restart Alice to
+ // ensure she'll pick it up.
+ require.NoError(ht, restartAlice())
+
+ // If we then mine 1 additional block, Carol's second level tx should
+ // mature, and she can pull the funds from it with a sweep tx.
+ resp := ht.AssertNumPendingForceClose(carol, 1)[0]
+ require.Equal(ht, 1, len(resp.PendingHtlcs))
+
+ ht.Logf("Carol's timelock to_local output=%v, timelock on second stage "+
+ "htlc=%v", resp.BlocksTilMaturity,
+ resp.PendingHtlcs[0].BlocksTilMaturity)
+
+ ht.MineBlocks(int(resp.PendingHtlcs[0].BlocksTilMaturity))
+
+ // Carol should have one a sweep request for her second level tx.
+ ht.AssertNumPendingSweeps(carol, 1)
+
+ // Carol's sweep tx should be broadcast, assert it's in the mempool and
+ // mine it.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+
+ // We now mine blocks till the CSV lock on Bob's success HTLC on
+ // commitment Alice=>Bob expires.
+ resp = ht.AssertNumPendingForceClose(bob, 1)[0]
+ require.Equal(ht, 1, len(resp.PendingHtlcs))
+
+ ht.Logf("Bob's timelock to_local output=%v, timelock on second stage "+
+ "htlc=%v", resp.BlocksTilMaturity,
+ resp.PendingHtlcs[0].BlocksTilMaturity)
+
+ ht.MineBlocks(int(resp.PendingHtlcs[0].BlocksTilMaturity))
+
+ // Bob should have three requests in his sweeper.
+ // - the second level HTLC tx.
+ // - the anchor output from channel Alice=>Bob.
+ // - the anchor output from channel Bob=>Carol.
+ ht.AssertNumPendingSweeps(bob, 3)
+
+ // When we mine one additional block, that will confirm Bob's sweep.
+ // Now Bob should have no pending channels anymore, as this just
+ // resolved it by the confirmation of the sweep transaction.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+
+ // All nodes should show zero pending and open channels.
+ for _, node := range []*node.HarnessNode{alice, bob, carol} {
+ ht.AssertNumPendingForceClose(node, 0)
+ ht.AssertNodeNumChannels(node, 0)
+ }
+
+ // Finally, check that the Alice's payment is correctly marked
+ // succeeded.
+ ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED)
+}
+
+// testLocalClaimIncomingHTLCLeasedZeroConf tests
+// `runLocalClaimIncomingHTLCLeased` with zero-conf script enforced lease
+// channel.
+func testLocalClaimIncomingHTLCLeasedZeroConf(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using zero-conf
+ // anchor channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ ZeroConf: true,
+ CommitmentType: leasedType,
+ }
+
+ // Prepare Carol's node config to enable zero-conf and leased channel.
+ cfg := node.CfgLeased
+ cfg = append(cfg, node.CfgZeroConf...)
+ cfgs := [][]string{cfg, cfg, cfg}
+
+ runLocalClaimIncomingHTLCLeased(ht, cfgs, params)
+}
+
+// testLocalClaimIncomingHTLCLeased tests `runLocalClaimIncomingHTLCLeased`
+// with script enforced lease channel.
+func testLocalClaimIncomingHTLCLeased(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using leased
+ // channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ CommitmentType: leasedType,
+ }
+
+ cfg := node.CfgLeased
+ cfgs := [][]string{cfg, cfg, cfg}
+
+ runLocalClaimIncomingHTLCLeased(ht, cfgs, params)
+}
+
+// runLocalClaimIncomingHTLCLeased tests that in a multi-hop HTLC scenario, if
+// we force close a channel with an incoming HTLC, and later find out the
+// preimage via the witness beacon, we properly settle the HTLC on-chain using
+// the HTLC success transaction in order to ensure we don't lose any funds.
+//
+// TODO(yy): simplify or remove this test as it's too complicated.
+func runLocalClaimIncomingHTLCLeased(ht *lntest.HarnessTest,
+ cfgs [][]string, params lntest.OpenChannelParams) {
+
+ // Set the min relay feerate to be 5 sat/vbyte so the non-CPFP anchor
+ // is never swept.
+ //
+ // TODO(yy): delete this line once the normal anchor sweeping is
+ // removed.
+ ht.SetMinRelayFeerate(5000)
+
+ // Create a three hop network: Alice -> Bob -> Carol.
+ chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params)
+ alice, bob, carol := nodes[0], nodes[1], nodes[2]
+ aliceChanPoint, bobChanPoint := chanPoints[0], chanPoints[1]
+
+ // Fund Carol one UTXO so she can sweep outputs.
+ ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
+
+ // With the network active, we'll now add a new hodl invoice at Carol's
+ // end. Make sure the cltv expiry delta is large enough, otherwise Bob
+ // won't send out the outgoing htlc.
+ preimage := ht.RandomPreimage()
+ payHash := preimage.Hash()
+
+ invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
+ Value: invoiceAmt,
+ CltvExpiry: finalCltvDelta,
+ Hash: payHash[:],
+ }
+ carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq)
+
+ // Subscribe the invoice.
+ stream := carol.RPC.SubscribeSingleInvoice(payHash[:])
+
+ // Now that we've created the invoice, we'll send a single payment from
+ // Alice to Carol. We won't wait for the response however, as Carol
+ // will not immediately settle the payment.
+ req := &routerrpc.SendPaymentRequest{
+ PaymentRequest: carolInvoice.PaymentRequest,
+ TimeoutSeconds: 60,
+ FeeLimitMsat: noFeeLimitMsat,
+ }
+ ht.SendPaymentAssertInflight(alice, req)
+
+ // At this point, all 3 nodes should now have an active channel with
+ // the created HTLC pending on all of them.
+ // At this point, all 3 nodes should now have an active channel with
+ // the created HTLCs pending on all of them.
+ //
+ // Alice should have one outgoing HTLC on channel Alice -> Bob.
+ ht.AssertNumActiveHtlcs(alice, 1)
+
+ // Bob should have one incoming HTLC on channel Alice -> Bob, and one
+ // outgoing HTLC on channel Bob -> Carol.
+ ht.AssertNumActiveHtlcs(bob, 2)
+
+ // Carol should have one incoming HTLC on channel Bob -> Carol.
+ ht.AssertNumActiveHtlcs(carol, 1)
+
+ // Wait for carol to mark invoice as accepted. There is a small gap to
+ // bridge between adding the htlc to the channel and executing the exit
+ // hop logic.
+ ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED)
+
+ // At this point, Bob decides that he wants to exit the channel
+ // Alice=>Bob immediately, so he force closes his commitment tx.
+ closeStream, _ := ht.CloseChannelAssertPending(
+ bob, aliceChanPoint, true,
+ )
+
+ // For anchor channels, the anchor won't be used for CPFP as there's no
+ // deadline pressure for Bob on the channel Alice->Bob at the moment.
+ // For Bob's local commitment tx, there's only one incoming HTLC which
+ // he doesn't have the preimage yet.
+ hasAnchorSweep := false
+ bobForceClose := ht.AssertStreamChannelForceClosed(
+ bob, aliceChanPoint, hasAnchorSweep, closeStream,
+ )
+
+ // Alice will offer her anchor output to her sweeper. Her commitment
+ // output cannot be swept yet as it has incurred an additional CLTV due
+ // to being the initiator of a script-enforced leased channel.
+ //
+ // This anchor output cannot be swept due to it being uneconomical.
+ ht.AssertNumPendingSweeps(alice, 1)
+
+ // Bob will offer his anchor to his sweeper.
+ //
+ // This anchor output cannot be swept due to it being uneconomical.
+ ht.AssertNumPendingSweeps(bob, 1)
+
+ // Suspend Bob to force Carol to go to chain.
+ restartBob := ht.SuspendNode(bob)
+
+ // Settle invoice. This will just mark the invoice as settled, as there
+ // is no link anymore to remove the htlc from the commitment tx. For
+ // this test, it is important to actually settle and not leave the
+ // invoice in the accepted state, because without a known preimage, the
+ // channel arbitrator won't go to chain.
+ carol.RPC.SettleInvoice(preimage[:])
+
+ // We now advance the block height to the point where Carol will force
+ // close her channel with Bob, broadcast the closing tx but keep it
+ // unconfirmed.
+ numBlocks := padCLTV(
+ uint32(invoiceReq.CltvExpiry - incomingBroadcastDelta),
+ )
+ ht.MineBlocks(int(numBlocks) - 1)
+
+ // Expect two txns in the mempool,
+ // - Carol's force close tx.
+ // - Carol's CPFP anchor sweeping tx.
+ // Mine a block to confirm them.
+ ht.MineBlocksAndAssertNumTxes(1, 2)
+
+ // After the force close tx is mined, Carol should offer her
+ // second-level success HTLC tx to her sweeper.
+ ht.AssertNumPendingSweeps(carol, 1)
+
+ // Restart bob again.
+ require.NoError(ht, restartBob())
+
+ // Once Bob is online and sees the force close tx Bob=>Carol, he will
+ // offer his commitment output to his sweeper, which will be skipped
+ // due to it being timelocked. His anchor outputs will not be swept due
+ // to uneconomical. We expect to see two sweeping requests,
+ // - the anchor output from channel Alice=>Bob.
+ // - the anchor output from channel Bob=>Carol.
+ ht.AssertNumPendingSweeps(bob, 2)
+
+ // Assert txns can be found in the mempool.
+ //
+ // Carol will broadcast her second-level HTLC sweeping txns. Bob canoot
+ // sweep his commitment anchor output yet due to it being CLTV locked.
+ ht.AssertNumTxsInMempool(1)
+
+ // At this point we suspend Alice to make sure she'll handle the
+ // on-chain settle after a restart.
+ restartAlice := ht.SuspendNode(alice)
+
+ // Mine a block to confirm the sweeping tx from Carol.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+
+ // When Bob notices Carol's second level tx in the block, he will
+ // extract the preimage and broadcast a second level tx to claim the
+ // HTLC in his (already closed) channel with Alice, which means Bob has
+ // three sweeping requests,
+ // - the second level HTLC tx from channel Alice=>Bob.
+ // - the anchor output from channel Alice=>Bob.
+ // - the anchor output from channel Bob=>Carol.
+ ht.AssertNumPendingSweeps(bob, 3)
+
+ // Mine a block to trigger the sweep. This is needed because the
+ // preimage extraction logic from the link is not managed by the
+ // blockbeat, which means the preimage may be sent to the contest
+ // resolver after it's launched.
+ //
+ // TODO(yy): Expose blockbeat to the link layer.
+ ht.MineEmptyBlocks(1)
+
+ // At this point, Bob should have broadcast his second layer success
+ // tx, and should have sent it to his sweeper.
+ //
+ // Check Bob's second level tx.
+ bobSecondLvlTx := ht.GetNumTxsFromMempool(1)[0]
+
+ // It should spend from the commitment in the channel with Alice.
+ ht.AssertTxSpendFrom(bobSecondLvlTx, bobForceClose)
+
+ // The channel between Bob and Carol will still be pending force close
+ // if this is a leased channel. We'd also check the HTLC stages are
+ // correct in both channels.
+ ht.AssertNumPendingForceClose(bob, 2)
+ ht.AssertNumHTLCsAndStage(bob, aliceChanPoint, 1, 1)
+ ht.AssertNumHTLCsAndStage(bob, bobChanPoint, 1, 1)
+
+ // We'll now mine a block which should confirm Bob's second layer tx.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+
+ // Now that the preimage from Bob has hit the chain, restart Alice to
+ // ensure she'll pick it up.
+ require.NoError(ht, restartAlice())
+
+ // If we then mine 1 additional block, Carol's second level tx should
+ // mature, and she can pull the funds from it with a sweep tx.
+ resp := ht.AssertNumPendingForceClose(carol, 1)[0]
+ require.Equal(ht, 1, len(resp.PendingHtlcs))
+
+ ht.Logf("Carol's timelock to_local output=%v, timelock on second stage "+
+ "htlc=%v", resp.BlocksTilMaturity,
+ resp.PendingHtlcs[0].BlocksTilMaturity)
+
+ ht.MineBlocks(int(resp.PendingHtlcs[0].BlocksTilMaturity))
+
+ // Carol should have one a sweep request for her second level tx.
+ ht.AssertNumPendingSweeps(carol, 1)
+
+ // Carol's sweep tx should be broadcast, assert it's in the mempool and
+ // mine it.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+
+ // We now mine blocks till the CSV lock on Bob's success HTLC on
+ // commitment Alice=>Bob expires.
+ resp = ht.AssertChannelPendingForceClose(bob, aliceChanPoint)
+ require.Equal(ht, 1, len(resp.PendingHtlcs))
+ htlcExpiry := resp.PendingHtlcs[0].BlocksTilMaturity
+
+ ht.Logf("Bob's timelock to_local output=%v, timelock on second stage "+
+ "htlc=%v", resp.BlocksTilMaturity, htlcExpiry)
+ ht.MineBlocks(int(htlcExpiry))
+
+ // When we mine one additional block, that will confirm Bob's second
+ // level HTLC sweep on channel Alice=>Bob.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+
+ // We now mine blocks till the CLTV lock on Bob's to_local output HTLC
+ // on commitment Bob=>Carol expires.
+ resp = ht.AssertChannelPendingForceClose(bob, bobChanPoint)
+ require.Equal(ht, 1, len(resp.PendingHtlcs))
+ htlcExpiry = resp.PendingHtlcs[0].BlocksTilMaturity
+
+ ht.Logf("Bob's timelock to_local output=%v, timelock on second stage "+
+ "htlc=%v", resp.BlocksTilMaturity, htlcExpiry)
+ ht.MineBlocks(int(resp.BlocksTilMaturity))
+
+ // Bob should have three requests in his sweeper.
+ // - to_local output from channel Bob=>Carol.
+ // - the anchor output from channel Alice=>Bob, uneconomical.
+ // - the anchor output from channel Bob=>Carol, uneconomical.
+ ht.AssertNumPendingSweeps(bob, 3)
+
+ // Alice should have two requests in her sweeper,
+ // - the anchor output from channel Alice=>Bob, uneconomical.
+ // - her commitment output, now mature.
+ ht.AssertNumPendingSweeps(alice, 2)
+
+ // Mine a block to confirm Bob's to_local output sweep.
+ ht.MineBlocksAndAssertNumTxes(1, 2)
+
+ // All nodes should show zero pending and open channels.
+ for _, node := range []*node.HarnessNode{alice, bob, carol} {
+ ht.AssertNumPendingForceClose(node, 0)
+ ht.AssertNodeNumChannels(node, 0)
+ }
+
+ // Finally, check that the Alice's payment is correctly marked
+ // succeeded.
+ ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED)
+}
+
+// testLocalPreimageClaimAnchorZeroConf tests `runLocalPreimageClaim` with
+// zero-conf anchor channel.
+func testLocalPreimageClaimAnchorZeroConf(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using zero-conf
+ // anchor channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ ZeroConf: true,
+ CommitmentType: lnrpc.CommitmentType_ANCHORS,
+ }
+
+ // Prepare Carol's node config to enable zero-conf and anchor.
+ cfg := node.CfgZeroConf
+ cfgs := [][]string{cfg, cfg, cfg}
+
+ runLocalPreimageClaim(ht, cfgs, params)
+}
+
+// testLocalPreimageClaimAnchor tests `runLocalPreimageClaim` with anchor
+// channel.
+func testLocalPreimageClaimAnchor(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using anchor
+ // channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{Amt: chanAmt}
+
+ cfg := node.CfgAnchor
+ cfgs := [][]string{cfg, cfg, cfg}
+
+ runLocalPreimageClaim(ht, cfgs, params)
+}
+
+// testLocalPreimageClaimSimpleTaprootZeroConf tests
+// `runLocalClaimIncomingHTLC` with zero-conf simple taproot channel.
+func testLocalPreimageClaimSimpleTaprootZeroConf(ht *lntest.HarnessTest) {
+ c := lnrpc.CommitmentType_SIMPLE_TAPROOT
+
+ // Create a three hop network: Alice -> Bob -> Carol, using zero-conf
+ // simple taproot channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ ZeroConf: true,
+ CommitmentType: c,
+ Private: true,
+ }
+
+ // Prepare Carol's node config to enable zero-conf and leased channel.
+ cfg := node.CfgSimpleTaproot
+ cfg = append(cfg, node.CfgZeroConf...)
+ cfgs := [][]string{cfg, cfg, cfg}
+
+ runLocalPreimageClaim(ht, cfgs, params)
+}
+
+// testLocalPreimageClaimSimpleTaproot tests `runLocalClaimIncomingHTLC` with
+// simple taproot channel.
+func testLocalPreimageClaimSimpleTaproot(ht *lntest.HarnessTest) {
+ c := lnrpc.CommitmentType_SIMPLE_TAPROOT
+
+ // Create a three hop network: Alice -> Bob -> Carol, using simple
+ // taproot channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ CommitmentType: c,
+ Private: true,
+ }
+
+ cfg := node.CfgSimpleTaproot
+ cfgs := [][]string{cfg, cfg, cfg}
+
+ runLocalPreimageClaim(ht, cfgs, params)
+}
+
+// runLocalPreimageClaim tests that in the multi-hop HTLC scenario, if the
+// remote party goes to chain while we have an incoming HTLC, then when we
+// found out the preimage via the witness beacon, we properly settle the HTLC
+// directly on-chain using the preimage in order to ensure that we don't lose
+// any funds.
+func runLocalPreimageClaim(ht *lntest.HarnessTest,
+ cfgs [][]string, params lntest.OpenChannelParams) {
+
+ // Set the min relay feerate to be 10 sat/vbyte so the non-CPFP anchor
+ // is never swept.
+ //
+ // TODO(yy): delete this line once the normal anchor sweeping is
+ // removed.
+ ht.SetMinRelayFeerate(10_000)
+
+ // Create a three hop network: Alice -> Bob -> Carol.
+ chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params)
+ alice, bob, carol := nodes[0], nodes[1], nodes[2]
+ aliceChanPoint := chanPoints[0]
+
+ // Fund Carol one UTXO so she can sweep outputs.
+ ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
+
+ // Carol should have enough wallet UTXOs here to sweep the HTLC in the
+ // end of this test. However, due to a known issue, Carol's wallet may
+ // report there's no UTXO available. For details,
+ // - https://github.com/lightningnetwork/lnd/issues/8786
+ //
+ // TODO(yy): remove this step once the issue is resolved.
+ ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
+
+ // If this is a taproot channel, then we'll need to make some manual
+ // route hints so Alice can actually find a route.
+ var routeHints []*lnrpc.RouteHint
+ if params.CommitmentType == lnrpc.CommitmentType_SIMPLE_TAPROOT {
+ routeHints = makeRouteHints(bob, carol, params.ZeroConf)
+ }
+
+ // With the network active, we'll now add a new hodl invoice at Carol's
+ // end. Make sure the cltv expiry delta is large enough, otherwise Bob
+ // won't send out the outgoing htlc.
+ preimage := ht.RandomPreimage()
+ payHash := preimage.Hash()
+
+ invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
+ Value: invoiceAmt,
+ CltvExpiry: finalCltvDelta,
+ Hash: payHash[:],
+ RouteHints: routeHints,
+ }
+ carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq)
+
+ // Subscribe the invoice.
+ stream := carol.RPC.SubscribeSingleInvoice(payHash[:])
+
+ // Now that we've created the invoice, we'll send a single payment from
+ // Alice to Carol. We won't wait for the response however, as Carol
+ // will not immediately settle the payment.
+ req := &routerrpc.SendPaymentRequest{
+ PaymentRequest: carolInvoice.PaymentRequest,
+ TimeoutSeconds: 60,
+ FeeLimitMsat: noFeeLimitMsat,
+ }
+ ht.SendPaymentAssertInflight(alice, req)
+
+ // At this point, all 3 nodes should now have an active channel with
+ // the created HTLCs pending on all of them.
+ //
+ // Alice should have one outgoing HTLC on channel Alice -> Bob.
+ ht.AssertNumActiveHtlcs(alice, 1)
+
+ // Bob should have one incoming HTLC on channel Alice -> Bob, and one
+ // outgoing HTLC on channel Bob -> Carol.
+ ht.AssertNumActiveHtlcs(bob, 2)
+
+ // Carol should have one incoming HTLC on channel Bob -> Carol.
+ ht.AssertNumActiveHtlcs(carol, 1)
+
+ // Wait for carol to mark invoice as accepted. There is a small gap to
+ // bridge between adding the htlc to the channel and executing the exit
+ // hop logic.
+ ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED)
+
+ // Record the height which the invoice will expire.
+ invoiceExpiry := ht.CurrentHeight() + uint32(invoiceReq.CltvExpiry)
+
+ // Next, Alice decides that she wants to exit the channel, so she'll
+ // immediately force close the channel by broadcast her commitment
+ // transaction.
+ closeStream, _ := ht.CloseChannelAssertPending(
+ alice, aliceChanPoint, true,
+ )
+ aliceForceClose := ht.AssertStreamChannelForceClosed(
+ alice, aliceChanPoint, true, closeStream,
+ )
+
+ // Wait for the channel to be marked pending force close.
+ ht.AssertChannelPendingForceClose(alice, aliceChanPoint)
+
+ // Once the force closing tx is mined, Alice should offer the anchor
+ // output to her sweeper.
+ ht.AssertNumPendingSweeps(alice, 1)
+
+ // Bob should offer his anchor output to his sweeper.
+ ht.AssertNumPendingSweeps(bob, 1)
+
+ // Mine enough blocks for Alice to sweep her funds from the force
+ // closed channel. AssertStreamChannelForceClosed() already mined a
+ // block, so mine one less than defaultCSV in order to perform mempool
+ // assertions.
+ ht.MineBlocks(defaultCSV - 1)
+
+ // Mine Alice's commit sweeping tx.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+
+ // Suspend bob, so Carol is forced to go on chain.
+ restartBob := ht.SuspendNode(bob)
+
+ // Settle invoice. This will just mark the invoice as settled, as there
+ // is no link anymore to remove the htlc from the commitment tx. For
+ // this test, it is important to actually settle and not leave the
+ // invoice in the accepted state, because without a known preimage, the
+ // channel arbitrator won't go to chain.
+ carol.RPC.SettleInvoice(preimage[:])
+
+ ht.Logf("Invoice expire height: %d, current: %d", invoiceExpiry,
+ ht.CurrentHeight())
+
+ // We'll now mine enough blocks so Carol decides that she needs to go
+ // on-chain to claim the HTLC as Bob has been inactive.
+ numBlocks := padCLTV(
+ invoiceExpiry - ht.CurrentHeight() - incomingBroadcastDelta,
+ )
+ ht.MineBlocks(int(numBlocks))
+
+ // Since Carol has time-sensitive HTLCs, she will use the anchor for
+ // CPFP purpose. Assert the anchor output is offered to the sweeper.
+ //
+ // For neutrino backend, Carol still have the two anchors - one from
+ // local commitment and the other from the remote.
+ if ht.IsNeutrinoBackend() {
+ ht.AssertNumPendingSweeps(carol, 2)
+ } else {
+ ht.AssertNumPendingSweeps(carol, 1)
+ }
+
+ // We should see two txns in the mempool, we now a block to confirm,
+ // - Carol's force close tx.
+ // - Carol's anchor sweeping tx.
+ ht.MineBlocksAndAssertNumTxes(1, 2)
+
+ // Once the force close tx is confirmed, Carol should offer her
+ // incoming HTLC to her sweeper.
+ ht.AssertNumPendingSweeps(carol, 1)
+
+ // Restart bob again.
+ require.NoError(ht, restartBob())
+
+ // Bob should have three sweeping requests,
+ // - the anchor output from channel Alice=>Bob, uneconomical.
+ // - the anchor output from channel Bob=>Carol, uneconomical.
+ // - the commit output sweep from the channel with Carol, no timelock.
+ ht.AssertNumPendingSweeps(bob, 3)
+
+ // Mine an empty block the for neutrino backend. We need this step to
+ // trigger Bob's chain watcher to detect the force close tx. Deep down,
+ // this happens because the notification system for neutrino is very
+ // different from others. Specifically, when a block contains the force
+ // close tx is notified, these two calls,
+ // - RegisterBlockEpochNtfn, will notify the block first.
+ // - RegisterSpendNtfn, will wait for the neutrino notifier to sync to
+ // the block, then perform a GetUtxo, which, by the time the spend
+ // details are sent, the blockbeat is considered processed in Bob's
+ // chain watcher.
+ //
+ // TODO(yy): refactor txNotifier to fix the above issue.
+ if ht.IsNeutrinoBackend() {
+ ht.MineEmptyBlocks(1)
+ }
+
+ // We mine one block to confirm,
+ // - Carol's sweeping tx of the incoming HTLC.
+ // - Bob's sweeping tx of his commit output.
+ ht.MineBlocksAndAssertNumTxes(1, 2)
+
+ // When Bob notices Carol's second level tx in the block, he will
+ // extract the preimage and offer the HTLC to his sweeper. So he has,
+ // - the anchor output from channel Alice=>Bob, uneconomical.
+ // - the anchor output from channel Bob=>Carol, uneconomical.
+ // - the htlc sweeping tx.
+ ht.AssertNumPendingSweeps(bob, 3)
+
+ // Mine a block to trigger the sweep. This is needed because the
+ // preimage extraction logic from the link is not managed by the
+ // blockbeat, which means the preimage may be sent to the contest
+ // resolver after it's launched.
+ // Mine an empty block the for neutrino backend. We need this step to
+ // trigger Bob's chain watcher to detect the force close tx. Deep down,
+ // this happens because the notification system for neutrino is very
+ // different from others. Specifically, when a block contains the force
+ // close tx is notified, these two calls,
+ // - RegisterBlockEpochNtfn, will notify the block first.
+ // - RegisterSpendNtfn, will wait for the neutrino notifier to sync to
+ // the block, then perform a GetUtxo, which, by the time the spend
+ // details are sent, the blockbeat is considered processed in Bob's
+ // chain watcher.
+ //
+ // TODO(yy): refactor txNotifier to fix the above issue.
+ if ht.IsNeutrinoBackend() {
+ ht.MineEmptyBlocks(1)
+ }
+
+ //
+ // TODO(yy): Expose blockbeat to the link layer.
+ ht.MineEmptyBlocks(1)
+
+ // Bob should broadcast the sweeping of the direct preimage spent now.
+ bobHtlcSweep := ht.GetNumTxsFromMempool(1)[0]
+
+ // It should spend from the commitment in the channel with Alice.
+ ht.AssertTxSpendFrom(bobHtlcSweep, aliceForceClose)
+
+ // We'll now mine a block which should confirm Bob's HTLC sweep tx.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+
+ // Now that the sweeping tx has been confirmed, Bob should recognize
+ // that all contracts for the Bob-Carol channel have been fully
+ // resolved.
+ ht.AssertNumPendingForceClose(bob, 0)
+
+ // Mine blocks till Carol's second level tx matures.
+ resp := ht.AssertNumPendingForceClose(carol, 1)[0]
+ require.Equal(ht, 1, len(resp.PendingHtlcs))
+
+ ht.Logf("Carol's timelock to_local output=%v, timelock on second stage "+
+ "htlc=%v", resp.BlocksTilMaturity,
+ resp.PendingHtlcs[0].BlocksTilMaturity)
+
+ ht.MineBlocks(int(resp.PendingHtlcs[0].BlocksTilMaturity))
+
+ // Carol should offer the htlc output to her sweeper.
+ ht.AssertNumPendingSweeps(carol, 1)
+
+ // Mine a block to confirm Carol's sweeping tx.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+
+ // When Carol's sweep gets confirmed, she should have no more pending
+ // channels.
+ ht.AssertNumPendingForceClose(carol, 0)
+
+ // The invoice should show as settled for Carol, indicating that it was
+ // swept on-chain.
+ ht.AssertInvoiceState(stream, lnrpc.Invoice_SETTLED)
+
+ // Finally, check that the Alice's payment is correctly marked
+ // succeeded.
+ ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED)
+}
+
+// testLocalPreimageClaimLeasedZeroConf tests `runLocalPreimageClaim` with
+// zero-conf script enforced lease channel.
+func testLocalPreimageClaimLeasedZeroConf(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using zero-conf
+ // anchor channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ ZeroConf: true,
+ CommitmentType: leasedType,
+ }
+
+ // Prepare Carol's node config to enable zero-conf and leased channel.
+ cfg := node.CfgLeased
+ cfg = append(cfg, node.CfgZeroConf...)
+ cfgs := [][]string{cfg, cfg, cfg}
+
+ runLocalPreimageClaimLeased(ht, cfgs, params)
+}
+
+// testLocalPreimageClaimLeased tests `runLocalPreimageClaim` with script
+// enforced lease channel.
+func testLocalPreimageClaimLeased(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using leased
+ // channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ CommitmentType: leasedType,
+ }
+
+ cfg := node.CfgLeased
+ cfgs := [][]string{cfg, cfg, cfg}
+
+ runLocalPreimageClaimLeased(ht, cfgs, params)
+}
+
+// runLocalPreimageClaimLeased tests that in the multi-hop HTLC scenario, if
+// the remote party goes to chain while we have an incoming HTLC, then when we
+// found out the preimage via the witness beacon, we properly settle the HTLC
+// directly on-chain using the preimage in order to ensure that we don't lose
+// any funds.
+func runLocalPreimageClaimLeased(ht *lntest.HarnessTest,
+ cfgs [][]string, params lntest.OpenChannelParams) {
+
+ // Set the min relay feerate to be 10 sat/vbyte so the non-CPFP anchor
+ // is never swept.
+ //
+ // TODO(yy): delete this line once the normal anchor sweeping is
+ // removed.
+ ht.SetMinRelayFeerate(10_000)
+
+ // Create a three hop network: Alice -> Bob -> Carol.
+ chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params)
+ alice, bob, carol := nodes[0], nodes[1], nodes[2]
+ aliceChanPoint, bobChanPoint := chanPoints[0], chanPoints[1]
+
+ // Fund Carol one UTXO so she can sweep outputs.
+ ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
+
+ // With the network active, we'll now add a new hodl invoice at Carol's
+ // end. Make sure the cltv expiry delta is large enough, otherwise Bob
+ // won't send out the outgoing htlc.
+ preimage := ht.RandomPreimage()
+ payHash := preimage.Hash()
+
+ invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
+ Value: invoiceAmt,
+ CltvExpiry: finalCltvDelta,
+ Hash: payHash[:],
+ }
+ carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq)
+
+ // Subscribe the invoice.
+ stream := carol.RPC.SubscribeSingleInvoice(payHash[:])
+
+ // Now that we've created the invoice, we'll send a single payment from
+ // Alice to Carol. We won't wait for the response however, as Carol
+ // will not immediately settle the payment.
+ req := &routerrpc.SendPaymentRequest{
+ PaymentRequest: carolInvoice.PaymentRequest,
+ TimeoutSeconds: 60,
+ FeeLimitMsat: noFeeLimitMsat,
+ }
+ ht.SendPaymentAssertInflight(alice, req)
+
+ // At this point, all 3 nodes should now have an active channel with
+ // the created HTLCs pending on all of them.
+ //
+ // Alice should have one outgoing HTLC on channel Alice -> Bob.
+ ht.AssertNumActiveHtlcs(alice, 1)
+
+ // Bob should have one incoming HTLC on channel Alice -> Bob, and one
+ // outgoing HTLC on channel Bob -> Carol.
+ ht.AssertNumActiveHtlcs(bob, 2)
+
+ // Carol should have one incoming HTLC on channel Bob -> Carol.
+ ht.AssertNumActiveHtlcs(carol, 1)
+
+ // Wait for carol to mark invoice as accepted. There is a small gap to
+ // bridge between adding the htlc to the channel and executing the exit
+ // hop logic.
+ ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED)
+
+ // Record the height which the invoice will expire.
+ invoiceExpiry := ht.CurrentHeight() + uint32(invoiceReq.CltvExpiry)
+
+ // Next, Alice decides that she wants to exit the channel, so she'll
+ // immediately force close the channel by broadcast her commitment
+ // transaction.
+ closeStream, _ := ht.CloseChannelAssertPending(
+ alice, aliceChanPoint, true,
+ )
+ aliceForceClose := ht.AssertStreamChannelForceClosed(
+ alice, aliceChanPoint, true, closeStream,
+ )
+
+ // Wait for the channel to be marked pending force close.
+ ht.AssertChannelPendingForceClose(alice, aliceChanPoint)
+
+ // Once the force closing tx is mined, Alice should offer the anchor
+ // output to her sweeper.
+ ht.AssertNumPendingSweeps(alice, 1)
+
+ // Bob should offer his anchor output to his sweeper.
+ ht.AssertNumPendingSweeps(bob, 1)
+
+ // Suspend bob, so Carol is forced to go on chain.
+ restartBob := ht.SuspendNode(bob)
+
+ // Settle invoice. This will just mark the invoice as settled, as there
+ // is no link anymore to remove the htlc from the commitment tx. For
+ // this test, it is important to actually settle and not leave the
+ // invoice in the accepted state, because without a known preimage, the
+ // channel arbitrator won't go to chain.
+ carol.RPC.SettleInvoice(preimage[:])
+
+ ht.Logf("Invoice expire height: %d, current: %d", invoiceExpiry,
+ ht.CurrentHeight())
+
+ // We'll now mine enough blocks so Carol decides that she needs to go
+ // on-chain to claim the HTLC as Bob has been inactive.
+ numBlocks := padCLTV(
+ invoiceExpiry - ht.CurrentHeight() - incomingBroadcastDelta - 1,
+ )
+ ht.MineBlocks(int(numBlocks))
+
+ // Since Carol has time-sensitive HTLCs, she will use the anchor for
+ // CPFP purpose. Assert the anchor output is offered to the sweeper.
+ //
+ // For neutrino backend, there's no way to know the sweeping of the
+ // remote anchor is failed, so Carol still sees two pending sweeps.
+ if ht.IsNeutrinoBackend() {
+ ht.AssertNumPendingSweeps(carol, 2)
+ } else {
+ ht.AssertNumPendingSweeps(carol, 1)
+ }
+
+ // We should see two txns in the mempool, we now a block to confirm,
+ // - Carol's force close tx.
+ // - Carol's anchor sweeping tx.
+ ht.MineBlocksAndAssertNumTxes(1, 2)
+
+ // Once the force close tx is confirmed, Carol should offer her
+ // incoming HTLC to her sweeper.
+ ht.AssertNumPendingSweeps(carol, 1)
+
+ // Restart bob again.
+ require.NoError(ht, restartBob())
+
+ // Bob should have two sweeping requests,
+ // - the anchor output from channel Alice=>Bob, uneconomical.
+ // - the anchor output from channel Bob=>Carol, uneconomical.
+ // - the commit output sweep from the channel with Carol, which is CLTV
+ // locked so it won't show up the pending sweeps.
+ ht.AssertNumPendingSweeps(bob, 2)
+
+ // We mine one block to confirm,
+ // - Carol's sweeping tx of the incoming HTLC.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+
+ // When Bob notices Carol's second level tx in the block, he will
+ // extract the preimage and offer the HTLC to his sweeper. So he has,
+ // - the anchor output from channel Alice=>Bob, uneconomical.
+ // - the anchor output from channel Bob=>Carol, uneconomical.
+ // - the htlc sweeping tx.
+ ht.AssertNumPendingSweeps(bob, 3)
+
+ // Mine a block to trigger the sweep. This is needed because the
+ // preimage extraction logic from the link is not managed by the
+ // blockbeat, which means the preimage may be sent to the contest
+ // resolver after it's launched.
+ //
+ // TODO(yy): Expose blockbeat to the link layer.
+ ht.MineEmptyBlocks(1)
+
+ // Bob should broadcast the sweeping of the direct preimage spent now.
+ bobHtlcSweep := ht.GetNumTxsFromMempool(1)[0]
+
+ // It should spend from the commitment in the channel with Alice.
+ ht.AssertTxSpendFrom(bobHtlcSweep, aliceForceClose)
+
+ // We'll now mine a block which should confirm Bob's HTLC sweep tx.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+
+ // Now that the sweeping tx has been confirmed, Bob should recognize
+ // that all contracts for the Bob-Carol channel have been fully
+ // resolved.
+ ht.AssertNumPendingForceClose(bob, 1)
+ ht.AssertChannelPendingForceClose(bob, bobChanPoint)
+
+ // Mine blocks till Carol's second level tx matures.
+ resp := ht.AssertNumPendingForceClose(carol, 1)[0]
+ require.Equal(ht, 1, len(resp.PendingHtlcs))
+
+ ht.Logf("Carol's timelock to_local output=%v, timelock on second stage "+
+ "htlc=%v", resp.BlocksTilMaturity,
+ resp.PendingHtlcs[0].BlocksTilMaturity)
+
+ ht.MineBlocks(int(resp.PendingHtlcs[0].BlocksTilMaturity))
+
+ // Carol should offer the htlc output to her sweeper.
+ ht.AssertNumPendingSweeps(carol, 1)
+
+ // Mine a block to confirm Carol's sweeping tx.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+
+ // When Carol's sweep gets confirmed, she should have no more pending
+ // channels.
+ ht.AssertNumPendingForceClose(carol, 0)
+
+ // The invoice should show as settled for Carol, indicating that it was
+ // swept on-chain.
+ ht.AssertInvoiceState(stream, lnrpc.Invoice_SETTLED)
+
+ // Check that the Alice's payment is correctly marked succeeded.
+ ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED)
+
+ // With the script-enforced lease commitment type, Alice and Bob still
+ // haven't been able to sweep their respective commit outputs due to
+ // the additional CLTV. We'll need to mine enough blocks for the
+ // timelock to expire and prompt their sweep.
+ //
+ // Get num of blocks to mine.
+ resp = ht.AssertNumPendingForceClose(alice, 1)[0]
+ require.Equal(ht, 1, len(resp.PendingHtlcs))
+
+ ht.Logf("Alice's timelock to_local output=%v, timelock on second "+
+ "stage htlc=%v", resp.BlocksTilMaturity,
+ resp.PendingHtlcs[0].BlocksTilMaturity)
+
+ ht.MineBlocks(int(resp.BlocksTilMaturity))
+
+ // Alice should two sweeping requests,
+ // - the anchor output from channel Alice=>Bob, uneconomical.
+ // - the commit output sweep from the channel with Bob.
+ ht.AssertNumPendingSweeps(alice, 2)
+
+ // Bob should have three sweeping requests,
+ // - the anchor output from channel Alice=>Bob, uneconomical.
+ // - the anchor output from channel Bob=>Carol, uneconomical.
+ // - the commit output sweep from the channel with Carol.
+ ht.AssertNumPendingSweeps(bob, 3)
+
+ // Confirm their sweeps.
+ ht.MineBlocksAndAssertNumTxes(1, 2)
+
+ // Both nodes should consider the channel fully closed.
+ ht.AssertNumPendingForceClose(alice, 0)
+ ht.AssertNumPendingForceClose(bob, 0)
+}
+
+// testHtlcAggregaitonAnchor tests `runHtlcAggregation` with zero-conf anchor
+// channel.
+func testHtlcAggregaitonAnchorZeroConf(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using zero-conf
+ // anchor channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ ZeroConf: true,
+ CommitmentType: lnrpc.CommitmentType_ANCHORS,
+ }
+
+ // Prepare Carol's node config to enable zero-conf and anchor.
+ cfg := node.CfgZeroConf
+ cfgs := [][]string{cfg, cfg, cfg}
+
+ runHtlcAggregation(ht, cfgs, params)
+}
+
+// testHtlcAggregaitonAnchor tests `runHtlcAggregation` with anchor channel.
+func testHtlcAggregaitonAnchor(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using anchor
+ // channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{Amt: chanAmt}
+
+ cfg := node.CfgAnchor
+ cfgs := [][]string{cfg, cfg, cfg}
+
+ runHtlcAggregation(ht, cfgs, params)
+}
+
+// testHtlcAggregaitonSimpleTaprootZeroConf tests `runHtlcAggregation` with
+// zero-conf simple taproot channel.
+func testHtlcAggregaitonSimpleTaprootZeroConf(ht *lntest.HarnessTest) {
+ c := lnrpc.CommitmentType_SIMPLE_TAPROOT
+
+ // Create a three hop network: Alice -> Bob -> Carol, using zero-conf
+ // simple taproot channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ ZeroConf: true,
+ CommitmentType: c,
+ Private: true,
+ }
+
+ // Prepare Carol's node config to enable zero-conf and leased channel.
+ cfg := node.CfgSimpleTaproot
+ cfg = append(cfg, node.CfgZeroConf...)
+ cfgs := [][]string{cfg, cfg, cfg}
+
+ runHtlcAggregation(ht, cfgs, params)
+}
+
+// testHtlcAggregaitonSimpleTaproot tests `runHtlcAggregation` with simple
+// taproot channel.
+func testHtlcAggregaitonSimpleTaproot(ht *lntest.HarnessTest) {
+ c := lnrpc.CommitmentType_SIMPLE_TAPROOT
+
+ // Create a three hop network: Alice -> Bob -> Carol, using simple
+ // taproot channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ CommitmentType: c,
+ Private: true,
+ }
+
+ cfg := node.CfgSimpleTaproot
+ cfgs := [][]string{cfg, cfg, cfg}
+
+ runHtlcAggregation(ht, cfgs, params)
+}
+
+// testHtlcAggregaitonLeasedZeroConf tests `runHtlcAggregation` with zero-conf
+// script enforced lease channel.
+func testHtlcAggregaitonLeasedZeroConf(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using zero-conf
+ // anchor channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ ZeroConf: true,
+ CommitmentType: leasedType,
+ }
+
+ // Prepare Carol's node config to enable zero-conf and leased channel.
+ cfg := node.CfgLeased
+ cfg = append(cfg, node.CfgZeroConf...)
+ cfgs := [][]string{cfg, cfg, cfg}
+
+ runHtlcAggregation(ht, cfgs, params)
+}
+
+// testHtlcAggregaitonLeased tests `runHtlcAggregation` with script enforced
+// lease channel.
+func testHtlcAggregaitonLeased(ht *lntest.HarnessTest) {
+ // Create a three hop network: Alice -> Bob -> Carol, using leased
+ // channels.
+ //
+ // Prepare params.
+ params := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ CommitmentType: leasedType,
+ }
+
+ cfg := node.CfgLeased
+ cfgs := [][]string{cfg, cfg, cfg}
+
+ runHtlcAggregation(ht, cfgs, params)
+}
+
+// runHtlcAggregation tests that in a multi-hop HTLC scenario, if we force
+// close a channel with both incoming and outgoing HTLCs, we can properly
+// resolve them using the second level timeout and success transactions. In
+// case of anchor channels, the second-level spends can also be aggregated and
+// properly feebumped, so we'll check that as well.
+func runHtlcAggregation(ht *lntest.HarnessTest,
+ cfgs [][]string, params lntest.OpenChannelParams) {
+
+ // Set the min relay feerate to be 10 sat/vbyte so the non-CPFP anchor
+ // is never swept.
+ //
+ // TODO(yy): delete this line once the normal anchor sweeping is
+ // removed.
+ ht.SetMinRelayFeerate(10_000)
+
+ // Create a three hop network: Alice -> Bob -> Carol.
+ chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params)
+ alice, bob, carol := nodes[0], nodes[1], nodes[2]
+ _, bobChanPoint := chanPoints[0], chanPoints[1]
+
+ // We need one additional UTXO to create the sweeping tx for the
+ // second-level success txes.
+ ht.FundCoins(btcutil.SatoshiPerBitcoin, bob)
+
+ // Bob should have enough wallet UTXOs here to sweep the HTLC in the
+ // end of this test. However, due to a known issue, Bob's wallet may
+ // report there's no UTXO available. For details,
+ // - https://github.com/lightningnetwork/lnd/issues/8786
+ //
+ // TODO(yy): remove this step once the issue is resolved.
+ ht.FundCoins(btcutil.SatoshiPerBitcoin, bob)
+
+ // If this is a taproot channel, then we'll need to make some manual
+ // route hints so Alice+Carol can actually find a route.
+ var (
+ carolRouteHints []*lnrpc.RouteHint
+ aliceRouteHints []*lnrpc.RouteHint
+ )
+
+ if params.CommitmentType == lnrpc.CommitmentType_SIMPLE_TAPROOT {
+ carolRouteHints = makeRouteHints(bob, carol, params.ZeroConf)
+ aliceRouteHints = makeRouteHints(bob, alice, params.ZeroConf)
+ }
+
+ // To ensure we have capacity in both directions of the route, we'll
+ // make a fairly large payment Alice->Carol and settle it.
+ const reBalanceAmt = 500_000
+ invoice := &lnrpc.Invoice{
+ Value: reBalanceAmt,
+ RouteHints: carolRouteHints,
+ }
+ invResp := carol.RPC.AddInvoice(invoice)
+ ht.CompletePaymentRequests(alice, []string{invResp.PaymentRequest})
+
+ // Make sure Carol has settled the invoice.
+ ht.AssertInvoiceSettled(carol, invResp.PaymentAddr)
+
+ // With the network active, we'll now add a new hodl invoices at both
+ // Alice's and Carol's end. Make sure the cltv expiry delta is large
+ // enough, otherwise Bob won't send out the outgoing htlc.
+ const numInvoices = 5
+ const invoiceAmt = 50_000
+
+ var (
+ carolInvoices []*invoicesrpc.AddHoldInvoiceResp
+ aliceInvoices []*invoicesrpc.AddHoldInvoiceResp
+ alicePreimages []lntypes.Preimage
+ payHashes [][]byte
+ invoiceStreamsCarol []rpc.SingleInvoiceClient
+ invoiceStreamsAlice []rpc.SingleInvoiceClient
+ )
+
+ // Add Carol invoices.
+ for i := 0; i < numInvoices; i++ {
+ preimage := ht.RandomPreimage()
+ payHash := preimage.Hash()
+
+ invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
+ Value: invoiceAmt,
+ CltvExpiry: finalCltvDelta,
+ Hash: payHash[:],
+ RouteHints: carolRouteHints,
+ }
+ carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq)
+
+ carolInvoices = append(carolInvoices, carolInvoice)
+ payHashes = append(payHashes, payHash[:])
+
+ // Subscribe the invoice.
+ stream := carol.RPC.SubscribeSingleInvoice(payHash[:])
+ invoiceStreamsCarol = append(invoiceStreamsCarol, stream)
+ }
+
+ // We'll give Alice's invoices a longer CLTV expiry, to ensure the
+ // channel Bob<->Carol will be closed first.
+ for i := 0; i < numInvoices; i++ {
+ preimage := ht.RandomPreimage()
+ payHash := preimage.Hash()
+
+ invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
+ Value: invoiceAmt,
+ CltvExpiry: thawHeightDelta - 4,
+ Hash: payHash[:],
+ RouteHints: aliceRouteHints,
+ }
+ aliceInvoice := alice.RPC.AddHoldInvoice(invoiceReq)
+
+ aliceInvoices = append(aliceInvoices, aliceInvoice)
+ alicePreimages = append(alicePreimages, preimage)
+ payHashes = append(payHashes, payHash[:])
+
+ // Subscribe the invoice.
+ stream := alice.RPC.SubscribeSingleInvoice(payHash[:])
+ invoiceStreamsAlice = append(invoiceStreamsAlice, stream)
+ }
+
+ // Now that we've created the invoices, we'll pay them all from
+ // Alice<->Carol, going through Bob. We won't wait for the response
+ // however, as neither will immediately settle the payment.
+ //
+ // Alice will pay all of Carol's invoices.
+ for _, carolInvoice := range carolInvoices {
+ req := &routerrpc.SendPaymentRequest{
+ PaymentRequest: carolInvoice.PaymentRequest,
+ TimeoutSeconds: 60,
+ FeeLimitMsat: noFeeLimitMsat,
+ }
+ ht.SendPaymentAssertInflight(alice, req)
+ }
+
+ // And Carol will pay Alice's.
+ for _, aliceInvoice := range aliceInvoices {
+ req := &routerrpc.SendPaymentRequest{
+ PaymentRequest: aliceInvoice.PaymentRequest,
+ TimeoutSeconds: 60,
+ FeeLimitMsat: noFeeLimitMsat,
+ }
+ ht.SendPaymentAssertInflight(carol, req)
+ }
+
+ // At this point, all 3 nodes should now have an active channel with
+ // the created HTLCs pending on all of them.
+ //
+ // Alice sent numInvoices and received numInvoices payments, she should
+ // have numInvoices*2 HTLCs.
+ ht.AssertNumActiveHtlcs(alice, numInvoices*2)
+
+ // Bob should have 2*numInvoices HTLCs on channel Alice -> Bob, and
+ // numInvoices*2 HTLCs on channel Bob -> Carol.
+ ht.AssertNumActiveHtlcs(bob, numInvoices*4)
+
+ // Carol sent numInvoices and received numInvoices payments, she should
+ // have numInvoices*2 HTLCs.
+ ht.AssertNumActiveHtlcs(carol, numInvoices*2)
+
+ // Wait for Alice and Carol to mark the invoices as accepted. There is
+ // a small gap to bridge between adding the htlc to the channel and
+ // executing the exit hop logic.
+ for _, stream := range invoiceStreamsCarol {
+ ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED)
+ }
+
+ for _, stream := range invoiceStreamsAlice {
+ ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED)
+ }
+
+ // We want Carol's htlcs to expire off-chain to demonstrate bob's force
+ // close. However, Carol will cancel her invoices to prevent force
+ // closes, so we shut her down for now.
+ restartCarol := ht.SuspendNode(carol)
+
+ // We'll now mine enough blocks to trigger Bob's broadcast of his
+ // commitment transaction due to the fact that the Carol's HTLCs are
+ // about to timeout. With the default outgoing broadcast delta of zero,
+ // this will be the same height as the htlc expiry height.
+ numBlocks := padCLTV(
+ uint32(finalCltvDelta - lncfg.DefaultOutgoingBroadcastDelta),
+ )
+ ht.MineBlocks(int(numBlocks))
+
+ // Bob should have one anchor sweep request.
+ //
+ // For neutrino backend, there's no way to know the sweeping of the
+ // remote anchor is failed, so Bob still sees two pending sweeps.
+ if ht.IsNeutrinoBackend() {
+ ht.AssertNumPendingSweeps(bob, 2)
+ } else {
+ ht.AssertNumPendingSweeps(bob, 1)
+ }
+
+ // Bob's force close tx and anchor sweeping tx should now be found in
+ // the mempool.
+ ht.AssertNumTxsInMempool(2)
+
+ // Once bob has force closed, we can restart carol.
+ require.NoError(ht, restartCarol())
+
+ // Mine a block to confirm Bob's force close tx and anchor sweeping tx.
+ ht.MineBlocksAndAssertNumTxes(1, 2)
+
+ // Let Alice settle her invoices. When Bob now gets the preimages, he
+ // will broadcast his second-level txns to claim the htlcs.
+ for _, preimage := range alicePreimages {
+ alice.RPC.SettleInvoice(preimage[:])
+ }
+
+ // Bob should have `numInvoices` for both HTLC success and timeout
+ // txns.
+ ht.AssertNumPendingSweeps(bob, numInvoices*2)
+
+ // Mine a block to trigger the sweep. This is needed because the
+ // preimage extraction logic from the link is not managed by the
+ // blockbeat, which means the preimage may be sent to the contest
+ // resolver after it's launched.
+ //
+ // TODO(yy): Expose blockbeat to the link layer.
+ ht.MineEmptyBlocks(1)
+
+ // Carol should have commit and anchor outputs.
+ ht.AssertNumPendingSweeps(carol, 2)
+
+ // We expect to see three sweeping txns:
+ // 1. Bob's sweeping tx for all timeout HTLCs.
+ // 2. Bob's sweeping tx for all success HTLCs.
+ // 3. Carol's sweeping tx for her commit output.
+ // Mine a block to confirm them.
+ ht.MineBlocksAndAssertNumTxes(1, 3)
+
+ // For this channel, we also check the number of HTLCs and the stage
+ // are correct.
+ ht.AssertNumHTLCsAndStage(bob, bobChanPoint, numInvoices*2, 2)
+
+ // For non-leased channels, we can now mine one block so Bob will sweep
+ // his to_local output.
+ if params.CommitmentType != leasedType {
+ // Mine one block so Bob's to_local becomes mature.
+ ht.MineBlocks(1)
+
+ // Bob should offer the to_local output to his sweeper now.
+ ht.AssertNumPendingSweeps(bob, 1)
+
+ // Mine a block to confirm Bob's sweeping of his to_local
+ // output.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+ }
+
+ // Mine blocks till the CSV expires on Bob's HTLC output.
+ resp := ht.AssertNumPendingForceClose(bob, 1)[0]
+ require.Equal(ht, numInvoices*2, len(resp.PendingHtlcs))
+
+ ht.Logf("Bob's timelock to_local output=%v, timelock on second stage "+
+ "htlc=%v", resp.BlocksTilMaturity,
+ resp.PendingHtlcs[0].BlocksTilMaturity)
+
+ ht.MineBlocks(int(resp.PendingHtlcs[0].BlocksTilMaturity))
+
+ // With the above mined block, Bob's HTLCs should now all be offered to
+ // his sweeper since the CSV lock is now expired.
+ //
+ // For leased channel, due to the test setup, Bob's to_local output is
+ // now also mature and can be swept together with his HTLCs.
+ if params.CommitmentType == leasedType {
+ ht.AssertNumPendingSweeps(bob, numInvoices*2+1)
+ } else {
+ ht.AssertNumPendingSweeps(bob, numInvoices*2)
+ }
+
+ // When we mine one additional block, that will confirm Bob's second
+ // level sweep. Now Bob should have no pending channels anymore, as
+ // this just resolved it by the confirmation of the sweep tx.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+ ht.AssertNumPendingForceClose(bob, 0)
+
+ // Carol should have no channels left.
+ ht.AssertNumPendingForceClose(carol, 0)
+}
diff --git a/itest/lnd_multi-hop_test.go b/itest/lnd_multi-hop_test.go
deleted file mode 100644
index f3a906c141..0000000000
--- a/itest/lnd_multi-hop_test.go
+++ /dev/null
@@ -1,2697 +0,0 @@
-package itest
-
-import (
- "context"
- "fmt"
- "testing"
-
- "github.com/btcsuite/btcd/btcutil"
- "github.com/btcsuite/btcd/chaincfg/chainhash"
- "github.com/btcsuite/btcd/wire"
- "github.com/lightningnetwork/lnd/chainreg"
- "github.com/lightningnetwork/lnd/lncfg"
- "github.com/lightningnetwork/lnd/lnrpc"
- "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
- "github.com/lightningnetwork/lnd/lnrpc/routerrpc"
- "github.com/lightningnetwork/lnd/lntest"
- "github.com/lightningnetwork/lnd/lntest/node"
- "github.com/lightningnetwork/lnd/lntest/rpc"
- "github.com/lightningnetwork/lnd/lntest/wait"
- "github.com/lightningnetwork/lnd/lntypes"
- "github.com/lightningnetwork/lnd/lnwallet/chainfee"
- "github.com/lightningnetwork/lnd/routing"
- "github.com/stretchr/testify/require"
-)
-
-const (
- finalCltvDelta = routing.MinCLTVDelta // 18.
- thawHeightDelta = finalCltvDelta * 2 // 36.
-)
-
-var commitWithZeroConf = []struct {
- commitType lnrpc.CommitmentType
- zeroConf bool
-}{
- {
- commitType: lnrpc.CommitmentType_ANCHORS,
- zeroConf: false,
- },
- {
- commitType: lnrpc.CommitmentType_ANCHORS,
- zeroConf: true,
- },
- {
- commitType: lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE,
- zeroConf: false,
- },
- {
- commitType: lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE,
- zeroConf: true,
- },
- {
- commitType: lnrpc.CommitmentType_SIMPLE_TAPROOT,
- zeroConf: false,
- },
- {
- commitType: lnrpc.CommitmentType_SIMPLE_TAPROOT,
- zeroConf: true,
- },
-}
-
-// makeRouteHints creates a route hints that will allow Carol to be reached
-// using an unadvertised channel created by Bob (Bob -> Carol). If the zeroConf
-// bool is set, then the scid alias of Bob will be used in place.
-func makeRouteHints(bob, carol *node.HarnessNode,
- zeroConf bool) []*lnrpc.RouteHint {
-
- carolChans := carol.RPC.ListChannels(
- &lnrpc.ListChannelsRequest{},
- )
-
- carolChan := carolChans.Channels[0]
-
- hopHint := &lnrpc.HopHint{
- NodeId: carolChan.RemotePubkey,
- ChanId: carolChan.ChanId,
- FeeBaseMsat: uint32(
- chainreg.DefaultBitcoinBaseFeeMSat,
- ),
- FeeProportionalMillionths: uint32(
- chainreg.DefaultBitcoinFeeRate,
- ),
- CltvExpiryDelta: chainreg.DefaultBitcoinTimeLockDelta,
- }
-
- if zeroConf {
- bobChans := bob.RPC.ListChannels(
- &lnrpc.ListChannelsRequest{},
- )
-
- // Now that we have Bob's channels, scan for the channel he has
- // open to Carol so we can use the proper scid.
- var found bool
- for _, bobChan := range bobChans.Channels {
- if bobChan.RemotePubkey == carol.PubKeyStr {
- hopHint.ChanId = bobChan.AliasScids[0]
-
- found = true
-
- break
- }
- }
- if !found {
- bob.Fatalf("unable to create route hint")
- }
- }
-
- return []*lnrpc.RouteHint{
- {
- HopHints: []*lnrpc.HopHint{hopHint},
- },
- }
-}
-
-// caseRunner defines a single test case runner.
-type caseRunner func(ht *lntest.HarnessTest, alice, bob *node.HarnessNode,
- c lnrpc.CommitmentType, zeroConf bool)
-
-// runMultiHopHtlcClaimTest is a helper method to build test cases based on
-// different commitment types and zero-conf config and run them.
-//
-// TODO(yy): flatten this test.
-func runMultiHopHtlcClaimTest(ht *lntest.HarnessTest, tester caseRunner) {
- for _, typeAndConf := range commitWithZeroConf {
- typeAndConf := typeAndConf
- name := fmt.Sprintf("zeroconf=%v/committype=%v",
- typeAndConf.zeroConf, typeAndConf.commitType.String())
-
- // Create the nodes here so that separate logs will be created
- // for Alice and Bob.
- args := lntest.NodeArgsForCommitType(typeAndConf.commitType)
- if typeAndConf.zeroConf {
- args = append(
- args, "--protocol.option-scid-alias",
- "--protocol.zero-conf",
- )
- }
-
- s := ht.Run(name, func(t1 *testing.T) {
- st := ht.Subtest(t1)
-
- alice := st.NewNode("Alice", args)
- bob := st.NewNode("Bob", args)
- st.ConnectNodes(alice, bob)
-
- // Start each test with the default static fee estimate.
- st.SetFeeEstimate(12500)
-
- // Add test name to the logs.
- alice.AddToLogf("Running test case: %s", name)
- bob.AddToLogf("Running test case: %s", name)
-
- tester(
- st, alice, bob,
- typeAndConf.commitType, typeAndConf.zeroConf,
- )
- })
- if !s {
- return
- }
- }
-}
-
-// testMultiHopHtlcLocalTimeout tests that in a multi-hop HTLC scenario, if the
-// outgoing HTLC is about to time out, then we'll go to chain in order to claim
-// it using the HTLC timeout transaction. Any dust HTLC's should be immediately
-// canceled backwards. Once the timeout has been reached, then we should sweep
-// it on-chain, and cancel the HTLC backwards.
-func testMultiHopHtlcLocalTimeout(ht *lntest.HarnessTest) {
- runMultiHopHtlcClaimTest(ht, runMultiHopHtlcLocalTimeout)
-}
-
-func runMultiHopHtlcLocalTimeout(ht *lntest.HarnessTest,
- alice, bob *node.HarnessNode, c lnrpc.CommitmentType, zeroConf bool) {
-
- // First, we'll create a three hop network: Alice -> Bob -> Carol, with
- // Carol refusing to actually settle or directly cancel any HTLC's
- // self.
- aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork(
- ht, alice, bob, true, c, zeroConf,
- )
-
- // For neutrino backend, we need to fund one more UTXO for Bob so he
- // can sweep his outputs.
- if ht.IsNeutrinoBackend() {
- ht.FundCoins(btcutil.SatoshiPerBitcoin, bob)
- }
-
- // Now that our channels are set up, we'll send two HTLC's from Alice
- // to Carol. The first HTLC will be universally considered "dust",
- // while the second will be a proper fully valued HTLC.
- const (
- dustHtlcAmt = btcutil.Amount(100)
- htlcAmt = btcutil.Amount(300_000)
- )
-
- // We'll create two random payment hashes unknown to carol, then send
- // each of them by manually specifying the HTLC details.
- carolPubKey := carol.PubKey[:]
- dustPayHash := ht.Random32Bytes()
- payHash := ht.Random32Bytes()
-
- // If this is a taproot channel, then we'll need to make some manual
- // route hints so Alice can actually find a route.
- var routeHints []*lnrpc.RouteHint
- if c == lnrpc.CommitmentType_SIMPLE_TAPROOT {
- routeHints = makeRouteHints(bob, carol, zeroConf)
- }
-
- alice.RPC.SendPayment(&routerrpc.SendPaymentRequest{
- Dest: carolPubKey,
- Amt: int64(dustHtlcAmt),
- PaymentHash: dustPayHash,
- FinalCltvDelta: finalCltvDelta,
- TimeoutSeconds: 60,
- FeeLimitMsat: noFeeLimitMsat,
- RouteHints: routeHints,
- })
-
- alice.RPC.SendPayment(&routerrpc.SendPaymentRequest{
- Dest: carolPubKey,
- Amt: int64(htlcAmt),
- PaymentHash: payHash,
- FinalCltvDelta: finalCltvDelta,
- TimeoutSeconds: 60,
- FeeLimitMsat: noFeeLimitMsat,
- RouteHints: routeHints,
- })
-
- // Verify that all nodes in the path now have two HTLC's with the
- // proper parameters.
- ht.AssertActiveHtlcs(alice, dustPayHash, payHash)
- ht.AssertActiveHtlcs(bob, dustPayHash, payHash)
- ht.AssertActiveHtlcs(carol, dustPayHash, payHash)
-
- // Increase the fee estimate so that the following force close tx will
- // be cpfp'ed.
- ht.SetFeeEstimate(30000)
-
- // We'll now mine enough blocks to trigger Bob's broadcast of his
- // commitment transaction due to the fact that the HTLC is about to
- // timeout. With the default outgoing broadcast delta of zero, this will
- // be the same height as the htlc expiry height.
- numBlocks := padCLTV(
- uint32(finalCltvDelta - lncfg.DefaultOutgoingBroadcastDelta),
- )
- ht.MineBlocks(int(numBlocks))
-
- // Bob's force close transaction should now be found in the mempool.
- ht.AssertNumTxsInMempool(1)
- op := ht.OutPointFromChannelPoint(bobChanPoint)
- closeTx := ht.AssertOutpointInMempool(op)
-
- // Dust HTLCs are immediately canceled backwards as soon as the local
- // commitment tx is successfully broadcasted to the local mempool.
- ht.AssertActiveHtlcs(alice, payHash)
-
- // Bob's anchor output should be offered to his sweep since Bob has
- // time-sensitive HTLCs - we expect both anchors are offered.
- ht.AssertNumPendingSweeps(bob, 2)
-
- // Mine a block to confirm the closing transaction.
- ht.MineBlocksAndAssertNumTxes(1, 1)
-
- // With the closing transaction confirmed, we should expect Bob's HTLC
- // timeout transaction to be offered to the sweeper due to the expiry
- // being reached. we also expect Bon and Carol's anchor sweeps.
- ht.AssertNumPendingSweeps(bob, 2)
- ht.AssertNumPendingSweeps(carol, 1)
-
- // Mine a block to trigger Bob's sweeper to sweep.
- ht.MineEmptyBlocks(1)
-
- // The above mined block would trigger Bob and Carol's sweepers to take
- // action. We now expect two txns:
- // 1. Bob's sweeping tx anchor sweep should now be found in the mempool.
- // 2. Bob's HTLC timeout tx sweep should now be found in the mempool.
- // Carol's anchor sweep should be failed due to output being dust.
- ht.AssertNumTxsInMempool(2)
-
- htlcOutpoint := wire.OutPoint{Hash: closeTx.TxHash(), Index: 2}
- commitOutpoint := wire.OutPoint{Hash: closeTx.TxHash(), Index: 3}
- htlcTimeoutTxid := ht.AssertOutpointInMempool(
- htlcOutpoint,
- ).TxHash()
-
- // Mine a block to confirm the above two sweeping txns.
- ht.MineBlocksAndAssertNumTxes(1, 2)
-
- // With Bob's HTLC timeout transaction confirmed, there should be no
- // active HTLC's on the commitment transaction from Alice -> Bob.
- ht.AssertNumActiveHtlcs(alice, 0)
-
- // At this point, Bob should show that the pending HTLC has advanced to
- // the second stage and is ready to be swept once the timelock is up.
- pendingChanResp := bob.RPC.PendingChannels()
- require.Equal(ht, 1, len(pendingChanResp.PendingForceClosingChannels))
- forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
- require.NotZero(ht, forceCloseChan.LimboBalance)
- require.Positive(ht, forceCloseChan.BlocksTilMaturity)
- require.Equal(ht, 1, len(forceCloseChan.PendingHtlcs))
- require.Equal(ht, uint32(2), forceCloseChan.PendingHtlcs[0].Stage)
-
- ht.Logf("Bob's timelock on commit=%v, timelock on htlc=%v",
- forceCloseChan.BlocksTilMaturity,
- forceCloseChan.PendingHtlcs[0].BlocksTilMaturity)
-
- htlcTimeoutOutpoint := wire.OutPoint{Hash: htlcTimeoutTxid, Index: 0}
- if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
- // Since Bob is the initiator of the script-enforced leased
- // channel between him and Carol, he will incur an additional
- // CLTV on top of the usual CSV delay on any outputs that he
- // can sweep back to his wallet.
- blocksTilMaturity := int(forceCloseChan.BlocksTilMaturity)
-
- // We now mine enough blocks to trigger the sweep of the HTLC
- // timeout tx.
- ht.MineEmptyBlocks(blocksTilMaturity - 1)
-
- // Check that Bob has one pending sweeping tx - the HTLC
- // timeout tx.
- ht.AssertNumPendingSweeps(bob, 1)
-
- // Mine one more blocks, then his commit output will mature.
- // This will also trigger the sweeper to sweep his HTLC timeout
- // tx.
- ht.MineEmptyBlocks(1)
-
- // Check that Bob has two pending sweeping txns.
- ht.AssertNumPendingSweeps(bob, 2)
-
- // Assert that the HTLC timeout tx is now in the mempool.
- ht.AssertOutpointInMempool(htlcTimeoutOutpoint)
-
- // We now wait for 30 seconds to overcome the flake - there's a
- // block race between contractcourt and sweeper, causing the
- // sweep to be broadcast earlier.
- //
- // TODO(yy): remove this once `blockbeat` is in place.
- numExpected := 1
- err := wait.NoError(func() error {
- mem := ht.GetRawMempool()
- if len(mem) == 2 {
- numExpected = 2
- return nil
- }
-
- return fmt.Errorf("want %d, got %v in mempool: %v",
- numExpected, len(mem), mem)
- }, wait.DefaultTimeout)
- ht.Logf("Checking mempool got: %v", err)
-
- // Mine a block to trigger the sweep of his commit output and
- // confirm his HTLC timeout sweep.
- ht.MineBlocksAndAssertNumTxes(1, numExpected)
-
- // For leased channels, we need to mine one more block to
- // confirm Bob's commit output sweep.
- //
- // NOTE: we mine this block conditionally, as the commit output
- // may have already been swept one block earlier due to the
- // race in block consumption among subsystems.
- pendingChanResp := bob.RPC.PendingChannels()
- if len(pendingChanResp.PendingForceClosingChannels) != 0 {
- // Check that the sweep spends the expected inputs.
- ht.AssertOutpointInMempool(commitOutpoint)
- ht.MineBlocksAndAssertNumTxes(1, 1)
- }
- } else {
- // Since Bob force closed the channel between him and Carol, he
- // will incur the usual CSV delay on any outputs that he can
- // sweep back to his wallet. We'll subtract one block from our
- // current maturity period to assert on the mempool.
- numBlocks := int(forceCloseChan.BlocksTilMaturity - 1)
- ht.MineEmptyBlocks(numBlocks)
-
- // Check that Bob has a pending sweeping tx.
- ht.AssertNumPendingSweeps(bob, 1)
-
- // Mine a block the trigger the sweeping behavior.
- ht.MineEmptyBlocks(1)
-
- // Check that the sweep spends from the mined commitment.
- ht.AssertOutpointInMempool(commitOutpoint)
-
- // Mine one more block to trigger the timeout path.
- ht.MineBlocksAndAssertNumTxes(1, 1)
-
- // Bob's sweeper should now broadcast his second layer sweep
- // due to the CSV on the HTLC timeout output.
- ht.AssertOutpointInMempool(htlcTimeoutOutpoint)
-
- // Next, we'll mine a final block that should confirm the
- // sweeping transactions left.
- ht.MineBlocksAndAssertNumTxes(1, 1)
- }
-
- // Once this transaction has been confirmed, Bob should detect that he
- // no longer has any pending channels.
- ht.AssertNumPendingForceClose(bob, 0)
-
- // Coop close channel, expect no anchors.
- ht.CloseChannel(alice, aliceChanPoint)
-}
-
-// testMultiHopReceiverChainClaim tests that in the multi-hop setting, if the
-// receiver of an HTLC knows the preimage, but wasn't able to settle the HTLC
-// off-chain, then it goes on chain to claim the HTLC uing the HTLC success
-// transaction. In this scenario, the node that sent the outgoing HTLC should
-// extract the preimage from the sweep transaction, and finish settling the
-// HTLC backwards into the route.
-func testMultiHopReceiverChainClaim(ht *lntest.HarnessTest) {
- runMultiHopHtlcClaimTest(ht, runMultiHopReceiverChainClaim)
-}
-
-func runMultiHopReceiverChainClaim(ht *lntest.HarnessTest,
- alice, bob *node.HarnessNode, c lnrpc.CommitmentType, zeroConf bool) {
-
- // First, we'll create a three hop network: Alice -> Bob -> Carol, with
- // Carol refusing to actually settle or directly cancel any HTLC's
- // self.
- aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork(
- ht, alice, bob, false, c, zeroConf,
- )
-
- // For neutrino backend, we need to fund one more UTXO for Carol so she
- // can sweep her outputs.
- if ht.IsNeutrinoBackend() {
- ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
- }
-
- // If this is a taproot channel, then we'll need to make some manual
- // route hints so Alice can actually find a route.
- var routeHints []*lnrpc.RouteHint
- if c == lnrpc.CommitmentType_SIMPLE_TAPROOT {
- routeHints = makeRouteHints(bob, carol, zeroConf)
- }
-
- // With the network active, we'll now add a new hodl invoice at Carol's
- // end. Make sure the cltv expiry delta is large enough, otherwise Bob
- // won't send out the outgoing htlc.
- const invoiceAmt = 100000
- var preimage lntypes.Preimage
- copy(preimage[:], ht.Random32Bytes())
- payHash := preimage.Hash()
- invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
- Value: invoiceAmt,
- CltvExpiry: finalCltvDelta,
- Hash: payHash[:],
- RouteHints: routeHints,
- }
- carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq)
-
- // Subscribe the invoice.
- stream := carol.RPC.SubscribeSingleInvoice(payHash[:])
-
- // Now that we've created the invoice, we'll send a single payment from
- // Alice to Carol. We won't wait for the response however, as Carol
- // will not immediately settle the payment.
- req := &routerrpc.SendPaymentRequest{
- PaymentRequest: carolInvoice.PaymentRequest,
- TimeoutSeconds: 60,
- FeeLimitMsat: noFeeLimitMsat,
- }
- alice.RPC.SendPayment(req)
-
- // At this point, all 3 nodes should now have an active channel with
- // the created HTLC pending on all of them.
- ht.AssertActiveHtlcs(alice, payHash[:])
- ht.AssertActiveHtlcs(bob, payHash[:])
- ht.AssertActiveHtlcs(carol, payHash[:])
-
- // Wait for carol to mark invoice as accepted. There is a small gap to
- // bridge between adding the htlc to the channel and executing the exit
- // hop logic.
- ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED)
-
- restartBob := ht.SuspendNode(bob)
-
- // Settle invoice. This will just mark the invoice as settled, as there
- // is no link anymore to remove the htlc from the commitment tx. For
- // this test, it is important to actually settle and not leave the
- // invoice in the accepted state, because without a known preimage, the
- // channel arbitrator won't go to chain.
- carol.RPC.SettleInvoice(preimage[:])
-
- // Increase the fee estimate so that the following force close tx will
- // be cpfp'ed.
- ht.SetFeeEstimate(30000)
-
- // We now advance the block height to the point where Carol will force
- // close her channel with Bob, broadcast the closing tx but keep it
- // unconfirmed.
- numBlocks := padCLTV(uint32(
- invoiceReq.CltvExpiry - lncfg.DefaultIncomingBroadcastDelta,
- ))
-
- // Now we'll mine enough blocks to prompt carol to actually go to the
- // chain in order to sweep her HTLC since the value is high enough.
- ht.MineEmptyBlocks(int(numBlocks))
-
- // At this point, Carol should broadcast her active commitment
- // transaction in order to go to the chain and sweep her HTLC.
- ht.AssertNumTxsInMempool(1)
-
- closingTx := ht.AssertOutpointInMempool(
- ht.OutPointFromChannelPoint(bobChanPoint),
- )
- closingTxid := closingTx.TxHash()
-
- // Carol's anchor should have been offered to her sweeper as she has
- // time-sensitive HTLCs. Assert that we have two anchors - one for the
- // anchor on the local commitment and the other for the anchor on the
- // remote commitment (invalid).
- ht.AssertNumPendingSweeps(carol, 2)
-
- // Confirm the commitment.
- ht.MineBlocksAndAssertNumTxes(1, 1)
-
- // The above mined block will trigger Carol's sweeper to publish the
- // anchor sweeping tx.
- //
- // TODO(yy): should instead cancel the broadcast of the anchor sweeping
- // tx to save fees since we know the force close tx has been confirmed?
- // This is very difficult as it introduces more complicated RBF
- // scenarios, as we are using a wallet utxo, which means any txns using
- // that wallet utxo must pay more fees. On the other hand, there's no
- // way to remove that anchor-CPFP tx from the mempool.
- ht.AssertNumTxsInMempool(1)
-
- // After the force close transaction is mined, Carol should offer her
- // second level HTLC tx to the sweeper, which means we should see two
- // pending inputs now - the anchor and the htlc.
- ht.AssertNumPendingSweeps(carol, 2)
-
- // Restart bob again.
- require.NoError(ht, restartBob())
-
- var expectedTxes int
-
- // After the force close transaction is mined, a series of transactions
- // should be broadcast by Bob and Carol. When Bob notices Carol's
- // second level transaction in the mempool, he will extract the
- // preimage and settle the HTLC back off-chain.
- switch c {
- // We expect to see three txns in the mempool:
- // 1. Carol should broadcast her second level HTLC tx.
- // 2. Carol should broadcast her anchor sweeping tx.
- // 3. Bob should broadcast a sweep tx to sweep his output in the
- // channel with Carol, and in the same sweep tx to sweep his anchor
- // output.
- case lnrpc.CommitmentType_ANCHORS, lnrpc.CommitmentType_SIMPLE_TAPROOT:
- expectedTxes = 3
- ht.AssertNumPendingSweeps(bob, 2)
-
- // We expect to see two txns in the mempool:
- // 1. Carol should broadcast her second level HTLC tx.
- // 2. Carol should broadcast her anchor sweeping tx.
- // Bob would offer his anchor output to his sweeper, but it cannot be
- // swept due to it being uneconomical. Bob's commit output can't be
- // swept yet as he's incurring an additional CLTV from being the
- // channel initiator of a script-enforced leased channel.
- case lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE:
- expectedTxes = 2
- ht.AssertNumPendingSweeps(bob, 1)
-
- default:
- ht.Fatalf("unhandled commitment type %v", c)
- }
-
- // Mine one block to trigger the sweeper to sweep.
- ht.MineEmptyBlocks(1)
-
- // All transactions should be spending from the commitment transaction.
- txes := ht.GetNumTxsFromMempool(expectedTxes)
- ht.AssertAllTxesSpendFrom(txes, closingTxid)
-
- // We'll now mine an additional block which should confirm both the
- // second layer transactions.
- ht.MineBlocksAndAssertNumTxes(1, expectedTxes)
-
- // Carol's pending channel report should now show two outputs under
- // limbo: her commitment output, as well as the second-layer claim
- // output, and the pending HTLC should also now be in stage 2.
- ht.AssertNumHTLCsAndStage(carol, bobChanPoint, 1, 2)
-
- // Once the second-level transaction confirmed, Bob should have
- // extracted the preimage from the chain, and sent it back to Alice,
- // clearing the HTLC off-chain.
- ht.AssertNumActiveHtlcs(alice, 0)
-
- // If we mine 4 additional blocks, then Carol can sweep the second
- // level HTLC output once the CSV expires.
- ht.MineEmptyBlocks(defaultCSV - 1)
-
- // Assert Carol has the pending HTLC sweep.
- ht.AssertNumPendingSweeps(carol, 1)
-
- // Mine one block to trigger the sweeper to sweep.
- ht.MineEmptyBlocks(1)
-
- // We should have a new transaction in the mempool.
- ht.AssertNumTxsInMempool(1)
-
- // Finally, if we mine an additional block to confirm Carol's second
- // level success transaction. Carol should not show a pending channel
- // in her report afterwards.
- ht.MineBlocksAndAssertNumTxes(1, 1)
- ht.AssertNumPendingForceClose(carol, 0)
-
- // The invoice should show as settled for Carol, indicating that it was
- // swept on-chain.
- ht.AssertInvoiceSettled(carol, carolInvoice.PaymentAddr)
-
- // Finally, check that the Alice's payment is correctly marked
- // succeeded.
- ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED)
-
- if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
- // Bob still has his commit output to sweep to since he
- // incurred an additional CLTV from being the channel initiator
- // of a script-enforced leased channel, regardless of whether
- // he forced closed the channel or not.
- pendingChanResp := bob.RPC.PendingChannels()
-
- require.Len(ht, pendingChanResp.PendingForceClosingChannels, 1)
- forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
- require.Positive(ht, forceCloseChan.LimboBalance)
- require.Positive(ht, forceCloseChan.BlocksTilMaturity)
-
- // TODO: Bob still shows a pending HTLC at this point when he
- // shouldn't, as he already extracted the preimage from Carol's
- // claim.
- // require.Len(t.t, forceCloseChan.PendingHtlcs, 0)
-
- // Mine enough blocks for Bob's commit output's CLTV to expire
- // and sweep it.
- numBlocks := int(forceCloseChan.BlocksTilMaturity)
- ht.MineEmptyBlocks(numBlocks)
-
- // Bob should have two pending inputs to be swept, the commit
- // output and the anchor output.
- ht.AssertNumPendingSweeps(bob, 2)
- ht.MineEmptyBlocks(1)
-
- commitOutpoint := wire.OutPoint{Hash: closingTxid, Index: 3}
- ht.AssertOutpointInMempool(commitOutpoint)
- ht.MineBlocksAndAssertNumTxes(1, 1)
- }
-
- ht.AssertNumPendingForceClose(bob, 0)
-
- // We'll close out the channel between Alice and Bob, then shutdown
- // carol to conclude the test.
- ht.CloseChannel(alice, aliceChanPoint)
-}
-
-// testMultiHopLocalForceCloseOnChainHtlcTimeout tests that in a multi-hop HTLC
-// scenario, if the node that extended the HTLC to the final node closes their
-// commitment on-chain early, then it eventually recognizes this HTLC as one
-// that's timed out. At this point, the node should timeout the HTLC using the
-// HTLC timeout transaction, then cancel it backwards as normal.
-func testMultiHopLocalForceCloseOnChainHtlcTimeout(ht *lntest.HarnessTest) {
- runMultiHopHtlcClaimTest(
- ht, runMultiHopLocalForceCloseOnChainHtlcTimeout,
- )
-}
-
-func runMultiHopLocalForceCloseOnChainHtlcTimeout(ht *lntest.HarnessTest,
- alice, bob *node.HarnessNode, c lnrpc.CommitmentType, zeroConf bool) {
-
- // First, we'll create a three hop network: Alice -> Bob -> Carol, with
- // Carol refusing to actually settle or directly cancel any HTLC's
- // self.
- aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork(
- ht, alice, bob, true, c, zeroConf,
- )
-
- // With our channels set up, we'll then send a single HTLC from Alice
- // to Carol. As Carol is in hodl mode, she won't settle this HTLC which
- // opens up the base for out tests.
- const htlcAmt = btcutil.Amount(300_000)
-
- // If this is a taproot channel, then we'll need to make some manual
- // route hints so Alice can actually find a route.
- var routeHints []*lnrpc.RouteHint
- if c == lnrpc.CommitmentType_SIMPLE_TAPROOT {
- routeHints = makeRouteHints(bob, carol, zeroConf)
- }
-
- // We'll now send a single HTLC across our multi-hop network.
- carolPubKey := carol.PubKey[:]
- payHash := ht.Random32Bytes()
- req := &routerrpc.SendPaymentRequest{
- Dest: carolPubKey,
- Amt: int64(htlcAmt),
- PaymentHash: payHash,
- FinalCltvDelta: finalCltvDelta,
- TimeoutSeconds: 60,
- FeeLimitMsat: noFeeLimitMsat,
- RouteHints: routeHints,
- }
- alice.RPC.SendPayment(req)
-
- // Once the HTLC has cleared, all channels in our mini network should
- // have the it locked in.
- ht.AssertActiveHtlcs(alice, payHash)
- ht.AssertActiveHtlcs(bob, payHash)
- ht.AssertActiveHtlcs(carol, payHash)
-
- // blocksMined records how many blocks have mined after the creation of
- // the invoice so it can be used to calculate how many more blocks need
- // to be mined to trigger a force close later on.
- var blocksMined uint32
-
- // Now that all parties have the HTLC locked in, we'll immediately
- // force close the Bob -> Carol channel. This should trigger contract
- // resolution mode for both of them.
- stream, _ := ht.CloseChannelAssertPending(bob, bobChanPoint, true)
- closeTx := ht.AssertStreamChannelForceClosed(
- bob, bobChanPoint, true, stream,
- )
-
- // Increase the blocks mined. At the step
- // AssertStreamChannelForceClosed mines one block.
- blocksMined++
-
- // The channel close has anchors, we should expect to see both Bob and
- // Carol has a pending sweep request for the anchor sweep.
- ht.AssertNumPendingSweeps(carol, 1)
- ht.AssertNumPendingSweeps(bob, 1)
-
- // Mine a block to confirm Bob's anchor sweep - Carol's anchor sweep
- // won't succeed because it's not used for CPFP, so there's no wallet
- // utxo used, resulting it to be uneconomical.
- ht.MineBlocksAndAssertNumTxes(1, 1)
- blocksMined++
-
- htlcOutpoint := wire.OutPoint{Hash: closeTx, Index: 2}
- bobCommitOutpoint := wire.OutPoint{Hash: closeTx, Index: 3}
-
- // Before the HTLC times out, we'll need to assert that Bob broadcasts
- // a sweep transaction for his commit output. Note that if the channel
- // has a script-enforced lease, then Bob will have to wait for an
- // additional CLTV before sweeping it.
- if c != lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
- // The sweep is broadcast on the block immediately before the
- // CSV expires and the commitment was already mined inside
- // AssertStreamChannelForceClosed(), so mine one block less
- // than defaultCSV in order to perform mempool assertions.
- ht.MineEmptyBlocks(int(defaultCSV - blocksMined))
- blocksMined = defaultCSV
-
- // Assert Bob has the sweep and trigger it.
- ht.AssertNumPendingSweeps(bob, 1)
- ht.MineEmptyBlocks(1)
- blocksMined++
-
- commitSweepTx := ht.AssertOutpointInMempool(
- bobCommitOutpoint,
- )
- txid := commitSweepTx.TxHash()
- block := ht.MineBlocksAndAssertNumTxes(1, 1)[0]
- ht.AssertTxInBlock(block, txid)
-
- blocksMined++
- }
-
- // We'll now mine enough blocks for the HTLC to expire. After this, Bob
- // should hand off the now expired HTLC output to the utxo nursery.
- numBlocks := padCLTV(uint32(finalCltvDelta) -
- lncfg.DefaultOutgoingBroadcastDelta)
- ht.MineEmptyBlocks(int(numBlocks - blocksMined))
-
- // Bob's pending channel report should show that he has a single HTLC
- // that's now in stage one.
- ht.AssertNumHTLCsAndStage(bob, bobChanPoint, 1, 1)
-
- // Bob should have a pending sweep request.
- ht.AssertNumPendingSweeps(bob, 1)
-
- // Mine one block to trigger Bob's sweeper to sweep it.
- ht.MineEmptyBlocks(1)
-
- // We should also now find a transaction in the mempool, as Bob should
- // have broadcast his second layer timeout transaction.
- timeoutTx := ht.AssertOutpointInMempool(htlcOutpoint).TxHash()
-
- // Next, we'll mine an additional block. This should serve to confirm
- // the second layer timeout transaction.
- block := ht.MineBlocksAndAssertNumTxes(1, 1)[0]
- ht.AssertTxInBlock(block, timeoutTx)
-
- // With the second layer timeout transaction confirmed, Bob should have
- // canceled backwards the HTLC that carol sent.
- ht.AssertNumActiveHtlcs(bob, 0)
-
- // Additionally, Bob should now show that HTLC as being advanced to the
- // second stage.
- ht.AssertNumHTLCsAndStage(bob, bobChanPoint, 1, 2)
-
- // Bob should now broadcast a transaction that sweeps certain inputs
- // depending on the commitment type. We'll need to mine some blocks
- // before the broadcast is possible.
- resp := bob.RPC.PendingChannels()
-
- require.Len(ht, resp.PendingForceClosingChannels, 1)
- forceCloseChan := resp.PendingForceClosingChannels[0]
- require.Len(ht, forceCloseChan.PendingHtlcs, 1)
- pendingHtlc := forceCloseChan.PendingHtlcs[0]
- require.Positive(ht, pendingHtlc.BlocksTilMaturity)
- numBlocks = uint32(pendingHtlc.BlocksTilMaturity)
-
- ht.MineEmptyBlocks(int(numBlocks))
-
- var numExpected int
-
- // Now that the CSV/CLTV timelock has expired, the transaction should
- // either only sweep the HTLC timeout transaction, or sweep both the
- // HTLC timeout transaction and Bob's commit output depending on the
- // commitment type.
- if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
- // Assert the expected number of pending sweeps are found.
- sweeps := ht.AssertNumPendingSweeps(bob, 2)
-
- numExpected = 1
- if sweeps[0].DeadlineHeight != sweeps[1].DeadlineHeight {
- numExpected = 2
- }
- } else {
- ht.AssertNumPendingSweeps(bob, 1)
- numExpected = 1
- }
-
- // Mine a block to trigger the sweep.
- ht.MineEmptyBlocks(1)
-
- // Assert the sweeping tx is found in the mempool.
- htlcTimeoutOutpoint := wire.OutPoint{Hash: timeoutTx, Index: 0}
- ht.AssertOutpointInMempool(htlcTimeoutOutpoint)
-
- // Mine a block to confirm the sweep.
- ht.MineBlocksAndAssertNumTxes(1, numExpected)
-
- // At this point, Bob should no longer show any channels as pending
- // close.
- ht.AssertNumPendingForceClose(bob, 0)
-
- // Coop close, no anchors.
- ht.CloseChannel(alice, aliceChanPoint)
-}
-
-// testMultiHopRemoteForceCloseOnChainHtlcTimeout tests that if we extend a
-// multi-hop HTLC, and the final destination of the HTLC force closes the
-// channel, then we properly timeout the HTLC directly on *their* commitment
-// transaction once the timeout has expired. Once we sweep the transaction, we
-// should also cancel back the initial HTLC.
-func testMultiHopRemoteForceCloseOnChainHtlcTimeout(ht *lntest.HarnessTest) {
- runMultiHopHtlcClaimTest(
- ht, runMultiHopRemoteForceCloseOnChainHtlcTimeout,
- )
-}
-
-func runMultiHopRemoteForceCloseOnChainHtlcTimeout(ht *lntest.HarnessTest,
- alice, bob *node.HarnessNode, c lnrpc.CommitmentType, zeroConf bool) {
-
- // First, we'll create a three hop network: Alice -> Bob -> Carol, with
- // Carol refusing to actually settle or directly cancel any HTLC's
- // self.
- aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork(
- ht, alice, bob, true, c, zeroConf,
- )
-
- // With our channels set up, we'll then send a single HTLC from Alice
- // to Carol. As Carol is in hodl mode, she won't settle this HTLC which
- // opens up the base for out tests.
- const htlcAmt = btcutil.Amount(30000)
-
- // If this is a taproot channel, then we'll need to make some manual
- // route hints so Alice can actually find a route.
- var routeHints []*lnrpc.RouteHint
- if c == lnrpc.CommitmentType_SIMPLE_TAPROOT {
- routeHints = makeRouteHints(bob, carol, zeroConf)
- }
-
- // We'll now send a single HTLC across our multi-hop network.
- var preimage lntypes.Preimage
- copy(preimage[:], ht.Random32Bytes())
- payHash := preimage.Hash()
- invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
- Value: int64(htlcAmt),
- CltvExpiry: finalCltvDelta,
- Hash: payHash[:],
- RouteHints: routeHints,
- }
- carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq)
-
- // Subscribe the invoice.
- stream := carol.RPC.SubscribeSingleInvoice(payHash[:])
-
- req := &routerrpc.SendPaymentRequest{
- PaymentRequest: carolInvoice.PaymentRequest,
- TimeoutSeconds: 60,
- FeeLimitMsat: noFeeLimitMsat,
- }
- alice.RPC.SendPayment(req)
-
- // blocksMined records how many blocks have mined after the creation of
- // the invoice so it can be used to calculate how many more blocks need
- // to be mined to trigger a force close later on.
- var blocksMined uint32
-
- // Once the HTLC has cleared, all the nodes in our mini network should
- // show that the HTLC has been locked in.
- ht.AssertActiveHtlcs(alice, payHash[:])
- ht.AssertActiveHtlcs(bob, payHash[:])
- ht.AssertActiveHtlcs(carol, payHash[:])
-
- // At this point, we'll now instruct Carol to force close the
- // transaction. This will let us exercise that Bob is able to sweep the
- // expired HTLC on Carol's version of the commitment transaction.
- closeStream, _ := ht.CloseChannelAssertPending(
- carol, bobChanPoint, true,
- )
-
- // For anchor channels, the anchor won't be used for CPFP because
- // channel arbitrator thinks Carol doesn't have preimage for her
- // incoming HTLC on the commitment transaction Bob->Carol. Although
- // Carol created this invoice, because it's a hold invoice, the
- // preimage won't be generated automatically.
- closeTx := ht.AssertStreamChannelForceClosed(
- carol, bobChanPoint, true, closeStream,
- )
-
- // Increase the blocks mined. At this step
- // AssertStreamChannelForceClosed mines one block.
- blocksMined++
-
- // At this point, Bob should have a pending force close channel as
- // Carol has gone directly to chain.
- ht.AssertNumPendingForceClose(bob, 1)
-
- var expectedTxes int
- switch c {
- // Bob can sweep his commit and anchor outputs immediately. Carol will
- // also offer her anchor to her sweeper.
- case lnrpc.CommitmentType_ANCHORS, lnrpc.CommitmentType_SIMPLE_TAPROOT:
- ht.AssertNumPendingSweeps(bob, 2)
- ht.AssertNumPendingSweeps(carol, 1)
-
- // We expect to see only one sweeping tx to be published from
- // Bob, which sweeps his commit and anchor outputs in the same
- // tx. For Carol, since her anchor is not used for CPFP, it'd
- // be uneconomical to sweep so it will fail.
- expectedTxes = 1
-
- // Bob can't sweep his commit output yet as he was the initiator of a
- // script-enforced leased channel, so he'll always incur the additional
- // CLTV. He can still offer his anchor output to his sweeper however.
- case lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE:
- ht.AssertNumPendingSweeps(bob, 1)
- ht.AssertNumPendingSweeps(carol, 1)
-
- // We expect to see only no sweeping txns to be published,
- // neither Bob's or Carol's anchor sweep can succeed due to
- // it's uneconomical.
- expectedTxes = 0
-
- default:
- ht.Fatalf("unhandled commitment type %v", c)
- }
-
- // Mine one block to trigger the sweeps.
- ht.MineEmptyBlocks(1)
- blocksMined++
-
- // We now mine a block to clear up the mempool.
- ht.MineBlocksAndAssertNumTxes(1, expectedTxes)
- blocksMined++
-
- // Next, we'll mine enough blocks for the HTLC to expire. At this
- // point, Bob should hand off the output to his internal utxo nursery,
- // which will broadcast a sweep transaction.
- numBlocks := padCLTV(uint32(finalCltvDelta) -
- lncfg.DefaultOutgoingBroadcastDelta)
- ht.MineEmptyBlocks(int(numBlocks - blocksMined))
-
- // If we check Bob's pending channel report, it should show that he has
- // a single HTLC that's now in the second stage, as it skipped the
- // initial first stage since this is a direct HTLC.
- ht.AssertNumHTLCsAndStage(bob, bobChanPoint, 1, 2)
-
- // We need to generate an additional block to expire the CSV 1.
- ht.MineEmptyBlocks(1)
-
- // For script-enforced leased channels, Bob has failed to sweep his
- // anchor output before, so it's still pending.
- if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
- ht.AssertNumPendingSweeps(bob, 2)
- } else {
- // Bob should have a pending sweep request.
- ht.AssertNumPendingSweeps(bob, 1)
- }
-
- // Mine a block to trigger the sweeper to sweep it.
- ht.MineEmptyBlocks(1)
-
- // Bob's sweeping transaction should now be found in the mempool at
- // this point.
- sweepTx := ht.AssertNumTxsInMempool(1)[0]
-
- // If we mine an additional block, then this should confirm Bob's
- // transaction which sweeps the direct HTLC output.
- block := ht.MineBlocksAndAssertNumTxes(1, 1)[0]
- ht.AssertTxInBlock(block, sweepTx)
-
- // Now that the sweeping transaction has been confirmed, Bob should
- // cancel back that HTLC. As a result, Alice should not know of any
- // active HTLC's.
- ht.AssertNumActiveHtlcs(alice, 0)
-
- // Now we'll check Bob's pending channel report. Since this was Carol's
- // commitment, he doesn't have to wait for any CSV delays, but he may
- // still need to wait for a CLTV on his commit output to expire
- // depending on the commitment type.
- if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
- resp := bob.RPC.PendingChannels()
-
- require.Len(ht, resp.PendingForceClosingChannels, 1)
- forceCloseChan := resp.PendingForceClosingChannels[0]
- require.Positive(ht, forceCloseChan.BlocksTilMaturity)
-
- numBlocks := int(forceCloseChan.BlocksTilMaturity)
- ht.MineEmptyBlocks(numBlocks)
-
- // Assert the commit output has been offered to the sweeper.
- // Bob should have two pending sweep requests - one for the
- // commit output and one for the anchor output.
- ht.AssertNumPendingSweeps(bob, 2)
-
- // Mine a block to trigger the sweep.
- ht.MineEmptyBlocks(1)
-
- bobCommitOutpoint := wire.OutPoint{Hash: closeTx, Index: 3}
- bobCommitSweep := ht.AssertOutpointInMempool(
- bobCommitOutpoint,
- )
- bobCommitSweepTxid := bobCommitSweep.TxHash()
- block := ht.MineBlocksAndAssertNumTxes(1, 1)[0]
- ht.AssertTxInBlock(block, bobCommitSweepTxid)
- }
- ht.AssertNumPendingForceClose(bob, 0)
-
- // While we're here, we assert that our expired invoice's state is
- // correctly updated, and can no longer be settled.
- ht.AssertInvoiceState(stream, lnrpc.Invoice_CANCELED)
-
- // We'll close out the test by closing the channel from Alice to Bob,
- // and then shutting down the new node we created as its no longer
- // needed. Coop close, no anchors.
- ht.CloseChannel(alice, aliceChanPoint)
-}
-
-// testMultiHopHtlcLocalChainClaim tests that in a multi-hop HTLC scenario, if
-// we force close a channel with an incoming HTLC, and later find out the
-// preimage via the witness beacon, we properly settle the HTLC on-chain using
-// the HTLC success transaction in order to ensure we don't lose any funds.
-func testMultiHopHtlcLocalChainClaim(ht *lntest.HarnessTest) {
- runMultiHopHtlcClaimTest(ht, runMultiHopHtlcLocalChainClaim)
-}
-
-func runMultiHopHtlcLocalChainClaim(ht *lntest.HarnessTest,
- alice, bob *node.HarnessNode, c lnrpc.CommitmentType, zeroConf bool) {
-
- // First, we'll create a three hop network: Alice -> Bob -> Carol, with
- // Carol refusing to actually settle or directly cancel any HTLC's
- // self.
- aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork(
- ht, alice, bob, false, c, zeroConf,
- )
-
- // For neutrino backend, we need to fund one more UTXO for Carol so she
- // can sweep her outputs.
- if ht.IsNeutrinoBackend() {
- ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
- }
-
- // If this is a taproot channel, then we'll need to make some manual
- // route hints so Alice can actually find a route.
- var routeHints []*lnrpc.RouteHint
- if c == lnrpc.CommitmentType_SIMPLE_TAPROOT {
- routeHints = makeRouteHints(bob, carol, zeroConf)
- }
-
- // With the network active, we'll now add a new hodl invoice at Carol's
- // end. Make sure the cltv expiry delta is large enough, otherwise Bob
- // won't send out the outgoing htlc.
- const invoiceAmt = 100000
- var preimage lntypes.Preimage
- copy(preimage[:], ht.Random32Bytes())
- payHash := preimage.Hash()
- invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
- Value: invoiceAmt,
- CltvExpiry: finalCltvDelta,
- Hash: payHash[:],
- RouteHints: routeHints,
- }
- carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq)
-
- // Subscribe the invoice.
- stream := carol.RPC.SubscribeSingleInvoice(payHash[:])
-
- // Now that we've created the invoice, we'll send a single payment from
- // Alice to Carol. We won't wait for the response however, as Carol
- // will not immediately settle the payment.
- req := &routerrpc.SendPaymentRequest{
- PaymentRequest: carolInvoice.PaymentRequest,
- TimeoutSeconds: 60,
- FeeLimitMsat: noFeeLimitMsat,
- }
- alice.RPC.SendPayment(req)
-
- // At this point, all 3 nodes should now have an active channel with
- // the created HTLC pending on all of them.
- ht.AssertActiveHtlcs(alice, payHash[:])
- ht.AssertActiveHtlcs(bob, payHash[:])
- ht.AssertActiveHtlcs(carol, payHash[:])
-
- // Wait for carol to mark invoice as accepted. There is a small gap to
- // bridge between adding the htlc to the channel and executing the exit
- // hop logic.
- ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED)
-
- // blocksMined records how many blocks have mined after the creation of
- // the invoice so it can be used to calculate how many more blocks need
- // to be mined to trigger a force close later on.
- var blocksMined uint32
-
- // At this point, Bob decides that he wants to exit the channel
- // immediately, so he force closes his commitment transaction.
- closeStream, _ := ht.CloseChannelAssertPending(
- bob, aliceChanPoint, true,
- )
-
- // For anchor channels, the anchor won't be used for CPFP as there's no
- // deadline pressure for Bob on the channel Alice->Bob at the moment.
- // For Bob's local commitment tx, there's only one incoming HTLC which
- // he doesn't have the preimage yet. Thus this anchor won't be
- // force-swept.
- hasAnchorSweep := false
- bobForceClose := ht.AssertStreamChannelForceClosed(
- bob, aliceChanPoint, hasAnchorSweep, closeStream,
- )
-
- // Increase the blocks mined. At this step
- // AssertStreamChannelForceClosed mines one block.
- blocksMined++
-
- var expectedTxes int
- switch c {
- // Alice will sweep her commitment and anchor output immediately. Bob
- // will also offer his anchor to his sweeper.
- case lnrpc.CommitmentType_ANCHORS, lnrpc.CommitmentType_SIMPLE_TAPROOT:
- ht.AssertNumPendingSweeps(alice, 2)
- ht.AssertNumPendingSweeps(bob, 1)
-
- // We expect to see only one sweeping tx to be published from
- // Alice, which sweeps her commit and anchor outputs in the
- // same tx. For Bob, since his anchor is not used for CPFP,
- // it'd be uneconomical to sweep so it will fail.
- expectedTxes = 1
-
- // Alice will offer her anchor output to her sweeper. Her commitment
- // output cannot be swept yet as it has incurred an additional CLTV due
- // to being the initiator of a script-enforced leased channel.
- case lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE:
- ht.AssertNumPendingSweeps(alice, 1)
- ht.AssertNumPendingSweeps(bob, 1)
-
- // We expect to see only no sweeping txns to be published,
- // neither Alice's or Bob's anchor sweep can succeed due to
- // it's uneconomical.
- expectedTxes = 0
-
- default:
- ht.Fatalf("unhandled commitment type %v", c)
- }
-
- // Mine a block to trigger the sweeps.
- ht.MineEmptyBlocks(1)
- blocksMined++
-
- // Assert the expected num of txns are found in the mempool.
- ht.AssertNumTxsInMempool(expectedTxes)
-
- // Mine a block to clean up the mempool for the rest of the test.
- ht.MineBlocksAndAssertNumTxes(1, expectedTxes)
- blocksMined++
-
- // Suspend Bob to force Carol to go to chain.
- restartBob := ht.SuspendNode(bob)
-
- // Settle invoice. This will just mark the invoice as settled, as there
- // is no link anymore to remove the htlc from the commitment tx. For
- // this test, it is important to actually settle and not leave the
- // invoice in the accepted state, because without a known preimage, the
- // channel arbitrator won't go to chain.
- carol.RPC.SettleInvoice(preimage[:])
-
- // We now advance the block height to the point where Carol will force
- // close her channel with Bob, broadcast the closing tx but keep it
- // unconfirmed.
- numBlocks := padCLTV(uint32(invoiceReq.CltvExpiry -
- lncfg.DefaultIncomingBroadcastDelta))
- ht.MineEmptyBlocks(int(numBlocks - blocksMined))
-
- // Carol's commitment transaction should now be in the mempool.
- ht.AssertNumTxsInMempool(1)
-
- // Look up the closing transaction. It should be spending from the
- // funding transaction,
- closingTx := ht.AssertOutpointInMempool(
- ht.OutPointFromChannelPoint(bobChanPoint),
- )
- closingTxid := closingTx.TxHash()
-
- // Mine a block that should confirm the commit tx.
- block := ht.MineBlocksAndAssertNumTxes(1, 1)[0]
- ht.AssertTxInBlock(block, closingTxid)
-
- // After the force close transaction is mined, Carol should offer her
- // second-level success HTLC tx and anchor to the sweeper.
- ht.AssertNumPendingSweeps(carol, 2)
-
- // Restart bob again.
- require.NoError(ht, restartBob())
-
- // Lower the fee rate so Bob's two anchor outputs are economical to
- // be swept in one tx.
- ht.SetFeeEstimate(chainfee.FeePerKwFloor)
-
- // After the force close transaction is mined, transactions will be
- // broadcast by both Bob and Carol.
- switch c {
- // Carol will broadcast her sweeping txns and Bob will sweep his
- // commitment and anchor outputs, we'd expect to see three txns,
- // - Carol's second level HTLC transaction.
- // - Carol's anchor sweeping txns since it's used for CPFP.
- // - Bob's sweep tx spending his commitment output, and two anchor
- // outputs, one from channel Alice to Bob and the other from channel
- // Bob to Carol.
- case lnrpc.CommitmentType_ANCHORS, lnrpc.CommitmentType_SIMPLE_TAPROOT:
- ht.AssertNumPendingSweeps(bob, 3)
- expectedTxes = 3
-
- // Carol will broadcast her sweeping txns and Bob will sweep his
- // anchor outputs. Bob can't sweep his commitment output yet as it has
- // incurred an additional CLTV due to being the initiator of a
- // script-enforced leased channel:
- // - Carol's second level HTLC transaction.
- // - Carol's anchor sweeping txns since it's used for CPFP.
- // - Bob's sweep tx spending his two anchor outputs, one from channel
- // Alice to Bob and the other from channel Bob to Carol.
- case lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE:
- ht.AssertNumPendingSweeps(bob, 2)
- expectedTxes = 3
-
- default:
- ht.Fatalf("unhandled commitment type %v", c)
- }
-
- // Mine a block to trigger the sweeps.
- ht.MineEmptyBlocks(1)
-
- // Assert transactions can be found in the mempool.
- ht.AssertNumTxsInMempool(expectedTxes)
-
- // At this point we suspend Alice to make sure she'll handle the
- // on-chain settle after a restart.
- restartAlice := ht.SuspendNode(alice)
-
- // Mine a block to confirm the expected transactions (+ the coinbase).
- ht.MineBlocksAndAssertNumTxes(1, expectedTxes)
-
- // For a channel of the anchor type, we will subtract one block
- // from the default CSV, as the Sweeper will handle the input, and the
- // Sweeper sweeps the input as soon as the lock expires.
- secondLevelMaturity := uint32(defaultCSV - 1)
-
- // Keep track of the second level tx maturity.
- carolSecondLevelCSV := secondLevelMaturity
-
- // When Bob notices Carol's second level transaction in the block, he
- // will extract the preimage and broadcast a second level tx to claim
- // the HTLC in his (already closed) channel with Alice.
- ht.AssertNumPendingSweeps(bob, 1)
-
- // Mine a block to trigger the sweep of the second level tx.
- ht.MineEmptyBlocks(1)
- carolSecondLevelCSV--
-
- // Check Bob's second level tx.
- bobSecondLvlTx := ht.GetNumTxsFromMempool(1)[0]
-
- // It should spend from the commitment in the channel with Alice.
- ht.AssertTxSpendFrom(bobSecondLvlTx, bobForceClose)
-
- // At this point, Bob should have broadcast his second layer success
- // transaction, and should have sent it to the nursery for incubation.
- ht.AssertNumHTLCsAndStage(bob, aliceChanPoint, 1, 1)
-
- // The channel between Bob and Carol will still be pending force close
- // if this is a leased channel. In that case, we'd also check the HTLC
- // stages are correct in that channel.
- if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
- ht.AssertNumPendingForceClose(bob, 2)
- ht.AssertNumHTLCsAndStage(bob, bobChanPoint, 1, 1)
- } else {
- ht.AssertNumPendingForceClose(bob, 1)
- }
-
- // We'll now mine a block which should confirm Bob's second layer
- // transaction.
- ht.MineBlocksAndAssertNumTxes(1, 1)
-
- // Keep track of Bob's second level maturity, and decrement our track
- // of Carol's.
- bobSecondLevelCSV := secondLevelMaturity
- carolSecondLevelCSV--
-
- // Now that the preimage from Bob has hit the chain, restart Alice to
- // ensure she'll pick it up.
- require.NoError(ht, restartAlice())
-
- // If we then mine 1 additional blocks, Carol's second level tx should
- // mature, and she can pull the funds from it with a sweep tx.
- ht.MineEmptyBlocks(int(carolSecondLevelCSV))
- bobSecondLevelCSV -= carolSecondLevelCSV
-
- // Carol should have one a sweep request for her second level tx.
- ht.AssertNumPendingSweeps(carol, 1)
-
- // Mine a block to trigger the sweep.
- ht.MineEmptyBlocks(1)
- bobSecondLevelCSV--
-
- // Carol's sweep tx should be broadcast.
- carolSweep := ht.AssertNumTxsInMempool(1)[0]
-
- // Bob should offer his second level tx to his sweeper.
- ht.AssertNumPendingSweeps(bob, 1)
-
- // Mining one additional block, Bob's second level tx is mature, and he
- // can sweep the output.
- block = ht.MineBlocksAndAssertNumTxes(bobSecondLevelCSV, 1)[0]
- ht.AssertTxInBlock(block, carolSweep)
-
- bobSweep := ht.GetNumTxsFromMempool(1)[0]
- bobSweepTxid := bobSweep.TxHash()
-
- // When we mine one additional block, that will confirm Bob's sweep.
- // Now Bob should have no pending channels anymore, as this just
- // resolved it by the confirmation of the sweep transaction.
- block = ht.MineBlocksAndAssertNumTxes(1, 1)[0]
- ht.AssertTxInBlock(block, bobSweepTxid)
-
- // With the script-enforced lease commitment type, Alice and Bob still
- // haven't been able to sweep their respective commit outputs due to the
- // additional CLTV. We'll need to mine enough blocks for the timelock to
- // expire and prompt their sweep.
- if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
- for _, node := range []*node.HarnessNode{alice, bob} {
- ht.AssertNumPendingForceClose(node, 1)
- }
-
- // Due to the way the test is set up, Alice and Bob share the
- // same CLTV for their commit outputs even though it's enforced
- // on different channels (Alice-Bob and Bob-Carol).
- resp := alice.RPC.PendingChannels()
- require.Len(ht, resp.PendingForceClosingChannels, 1)
- forceCloseChan := resp.PendingForceClosingChannels[0]
- require.Positive(ht, forceCloseChan.BlocksTilMaturity)
-
- // Mine enough blocks for the timelock to expire.
- numBlocks := uint32(forceCloseChan.BlocksTilMaturity)
- ht.MineEmptyBlocks(int(numBlocks))
-
- // Both Alice and Bob should now offer their commit outputs to
- // the sweeper. For Alice, she still has her anchor output as
- // pending sweep as it's not used for CPFP, thus it's
- // uneconomical to sweep it alone.
- ht.AssertNumPendingSweeps(alice, 2)
- ht.AssertNumPendingSweeps(bob, 1)
-
- // Mine a block to trigger the sweeps.
- ht.MineEmptyBlocks(1)
-
- // Both Alice and Bob show broadcast their commit sweeps.
- aliceCommitOutpoint := wire.OutPoint{
- Hash: bobForceClose, Index: 3,
- }
- ht.AssertOutpointInMempool(
- aliceCommitOutpoint,
- ).TxHash()
- bobCommitOutpoint := wire.OutPoint{Hash: closingTxid, Index: 3}
- ht.AssertOutpointInMempool(
- bobCommitOutpoint,
- ).TxHash()
-
- // Confirm their sweeps.
- ht.MineBlocksAndAssertNumTxes(1, 2)
- }
-
- // All nodes should show zero pending and open channels.
- for _, node := range []*node.HarnessNode{alice, bob, carol} {
- ht.AssertNumPendingForceClose(node, 0)
- ht.AssertNodeNumChannels(node, 0)
- }
-
- // Finally, check that the Alice's payment is correctly marked
- // succeeded.
- ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED)
-}
-
-// testMultiHopHtlcRemoteChainClaim tests that in the multi-hop HTLC scenario,
-// if the remote party goes to chain while we have an incoming HTLC, then when
-// we found out the preimage via the witness beacon, we properly settle the
-// HTLC directly on-chain using the preimage in order to ensure that we don't
-// lose any funds.
-func testMultiHopHtlcRemoteChainClaim(ht *lntest.HarnessTest) {
- runMultiHopHtlcClaimTest(ht, runMultiHopHtlcRemoteChainClaim)
-}
-
-func runMultiHopHtlcRemoteChainClaim(ht *lntest.HarnessTest,
- alice, bob *node.HarnessNode, c lnrpc.CommitmentType, zeroConf bool) {
-
- // First, we'll create a three hop network: Alice -> Bob -> Carol, with
- // Carol refusing to actually settle or directly cancel any HTLC's
- // self.
- aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork(
- ht, alice, bob, false, c, zeroConf,
- )
-
- // If this is a taproot channel, then we'll need to make some manual
- // route hints so Alice can actually find a route.
- var routeHints []*lnrpc.RouteHint
- if c == lnrpc.CommitmentType_SIMPLE_TAPROOT {
- routeHints = makeRouteHints(bob, carol, zeroConf)
- }
-
- // With the network active, we'll now add a new hodl invoice at Carol's
- // end. Make sure the cltv expiry delta is large enough, otherwise Bob
- // won't send out the outgoing htlc.
- const invoiceAmt = 100000
- var preimage lntypes.Preimage
- copy(preimage[:], ht.Random32Bytes())
- payHash := preimage.Hash()
- invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
- Value: invoiceAmt,
- CltvExpiry: finalCltvDelta,
- Hash: payHash[:],
- RouteHints: routeHints,
- }
- carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq)
-
- // Subscribe the invoice.
- stream := carol.RPC.SubscribeSingleInvoice(payHash[:])
-
- // Now that we've created the invoice, we'll send a single payment from
- // Alice to Carol. We won't wait for the response however, as Carol
- // will not immediately settle the payment.
- req := &routerrpc.SendPaymentRequest{
- PaymentRequest: carolInvoice.PaymentRequest,
- TimeoutSeconds: 60,
- FeeLimitMsat: noFeeLimitMsat,
- }
- alice.RPC.SendPayment(req)
-
- // At this point, all 3 nodes should now have an active channel with
- // the created HTLC pending on all of them.
- ht.AssertActiveHtlcs(alice, payHash[:])
- ht.AssertActiveHtlcs(bob, payHash[:])
- ht.AssertActiveHtlcs(carol, payHash[:])
-
- // Wait for carol to mark invoice as accepted. There is a small gap to
- // bridge between adding the htlc to the channel and executing the exit
- // hop logic.
- ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED)
-
- // blocksMined records how many blocks have mined after the creation of
- // the invoice so it can be used to calculate how many more blocks need
- // to be mined to trigger a force close later on.
- var blocksMined int
-
- // Lower the fee rate so Bob's two anchor outputs are economical to
- // be swept in one tx.
- ht.SetFeeEstimate(chainfee.FeePerKwFloor)
-
- // Next, Alice decides that she wants to exit the channel, so she'll
- // immediately force close the channel by broadcast her commitment
- // transaction.
- closeStream, _ := ht.CloseChannelAssertPending(
- alice, aliceChanPoint, true,
- )
- aliceForceClose := ht.AssertStreamChannelForceClosed(
- alice, aliceChanPoint, true, closeStream,
- )
-
- // Increase the blocks mined. At this step
- // AssertStreamChannelForceClosed mines one block.
- blocksMined++
-
- // Wait for the channel to be marked pending force close.
- ht.AssertChannelPendingForceClose(alice, aliceChanPoint)
-
- // After AssertStreamChannelForceClosed returns, it has mined a block
- // so now bob will attempt to redeem his anchor output. Check the
- // anchor is offered to the sweeper.
- ht.AssertNumPendingSweeps(bob, 1)
- ht.AssertNumPendingSweeps(alice, 1)
-
- // Mine enough blocks for Alice to sweep her funds from the force
- // closed channel. AssertStreamChannelForceClosed() already mined a
- // block containing the commitment tx and the commit sweep tx will be
- // broadcast immediately before it can be included in a block, so mine
- // one less than defaultCSV in order to perform mempool assertions.
- if c != lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
- ht.MineEmptyBlocks(defaultCSV - blocksMined)
- blocksMined = defaultCSV
-
- // Alice should now sweep her funds.
- ht.AssertNumPendingSweeps(alice, 2)
-
- // Mine a block to trigger the sweep.
- ht.MineEmptyBlocks(1)
- blocksMined++
-
- // Mine Alice's commit sweeping tx.
- ht.MineBlocksAndAssertNumTxes(1, 1)
- blocksMined++
- }
-
- // Suspend bob, so Carol is forced to go on chain.
- restartBob := ht.SuspendNode(bob)
-
- // Settle invoice. This will just mark the invoice as settled, as there
- // is no link anymore to remove the htlc from the commitment tx. For
- // this test, it is important to actually settle and not leave the
- // invoice in the accepted state, because without a known preimage, the
- // channel arbitrator won't go to chain.
- carol.RPC.SettleInvoice(preimage[:])
-
- // We'll now mine enough blocks so Carol decides that she needs to go
- // on-chain to claim the HTLC as Bob has been inactive.
- numBlocks := padCLTV(uint32(
- invoiceReq.CltvExpiry - lncfg.DefaultIncomingBroadcastDelta,
- ))
- ht.MineEmptyBlocks(int(numBlocks) - blocksMined)
-
- // Carol's commitment transaction should now be in the mempool.
- ht.AssertNumTxsInMempool(1)
-
- // The closing transaction should be spending from the funding
- // transaction.
- closingTx := ht.AssertOutpointInMempool(
- ht.OutPointFromChannelPoint(bobChanPoint),
- )
- closingTxid := closingTx.TxHash()
-
- // Since Carol has time-sensitive HTLCs, she will use the anchor for
- // CPFP purpose. Assert she has two pending anchor sweep requests - one
- // from local commit and the other from remote commit.
- ht.AssertNumPendingSweeps(carol, 2)
-
- // Mine a block, which should contain: the commitment.
- block := ht.MineBlocksAndAssertNumTxes(1, 1)[0]
- ht.AssertTxInBlock(block, closingTxid)
-
- // After the force close transaction is mined, Carol should offer her
- // second level HTLC tx to the sweeper, along with her anchor output.
- ht.AssertNumPendingSweeps(carol, 2)
-
- // Restart bob again.
- require.NoError(ht, restartBob())
-
- // After the force close transaction is mined, we should expect Bob and
- // Carol to broadcast some transactions depending on the channel
- // commitment type.
- switch c {
- // Carol should broadcast her second level HTLC transaction and Bob
- // should broadcast a sweeping tx to sweep his commitment output and
- // anchor outputs from the two channels.
- case lnrpc.CommitmentType_ANCHORS, lnrpc.CommitmentType_SIMPLE_TAPROOT:
- ht.AssertNumPendingSweeps(bob, 3)
-
- // Carol should broadcast her second level HTLC transaction and Bob
- // should broadcast a transaction to sweep his anchor outputs. Bob
- // can't sweep his commitment output yet as he has incurred an
- // additional CLTV due to being the channel initiator of a force closed
- // script-enforced leased channel.
- case lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE:
- ht.AssertNumPendingSweeps(bob, 2)
-
- default:
- ht.Fatalf("unhandled commitment type %v", c)
- }
-
- // Keep track of the second level tx maturity.
- carolSecondLevelCSV := uint32(defaultCSV)
-
- // Mine a block to trigger the sweeps, also confirms Carol's CPFP
- // anchor sweeping.
- ht.MineBlocksAndAssertNumTxes(1, 1)
- carolSecondLevelCSV--
- ht.AssertNumTxsInMempool(2)
-
- // Mine a block to confirm the expected transactions.
- ht.MineBlocksAndAssertNumTxes(1, 2)
-
- // When Bob notices Carol's second level transaction in the block, he
- // will extract the preimage and offer the HTLC to his sweeper.
- ht.AssertNumPendingSweeps(bob, 1)
-
- // NOTE: after Bob is restarted, the sweeping of the direct preimage
- // spent will happen immediately so we don't need to mine a block to
- // trigger Bob's sweeper to sweep it.
- bobHtlcSweep := ht.GetNumTxsFromMempool(1)[0]
- bobHtlcSweepTxid := bobHtlcSweep.TxHash()
-
- // It should spend from the commitment in the channel with Alice.
- ht.AssertTxSpendFrom(bobHtlcSweep, aliceForceClose)
-
- // We'll now mine a block which should confirm Bob's HTLC sweep
- // transaction.
- block = ht.MineBlocksAndAssertNumTxes(1, 1)[0]
- ht.AssertTxInBlock(block, bobHtlcSweepTxid)
- carolSecondLevelCSV--
-
- // Now that the sweeping transaction has been confirmed, Bob should now
- // recognize that all contracts for the Bob-Carol channel have been
- // fully resolved
- aliceBobPendingChansLeft := 0
- if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
- aliceBobPendingChansLeft = 1
- }
- for _, node := range []*node.HarnessNode{alice, bob} {
- ht.AssertNumPendingForceClose(
- node, aliceBobPendingChansLeft,
- )
- }
-
- // If we then mine 3 additional blocks, Carol's second level tx will
- // mature, and she should pull the funds.
- ht.MineEmptyBlocks(int(carolSecondLevelCSV))
- ht.AssertNumPendingSweeps(carol, 1)
-
- // Mine a block to trigger the sweep of the second level tx.
- ht.MineEmptyBlocks(1)
- carolSweep := ht.AssertNumTxsInMempool(1)[0]
-
- // When Carol's sweep gets confirmed, she should have no more pending
- // channels.
- block = ht.MineBlocksAndAssertNumTxes(1, 1)[0]
- ht.AssertTxInBlock(block, carolSweep)
- ht.AssertNumPendingForceClose(carol, 0)
-
- // With the script-enforced lease commitment type, Alice and Bob still
- // haven't been able to sweep their respective commit outputs due to the
- // additional CLTV. We'll need to mine enough blocks for the timelock to
- // expire and prompt their sweep.
- if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
- // Due to the way the test is set up, Alice and Bob share the
- // same CLTV for their commit outputs even though it's enforced
- // on different channels (Alice-Bob and Bob-Carol).
- resp := alice.RPC.PendingChannels()
- require.Len(ht, resp.PendingForceClosingChannels, 1)
- forceCloseChan := resp.PendingForceClosingChannels[0]
- require.Positive(ht, forceCloseChan.BlocksTilMaturity)
-
- // Mine enough blocks for the timelock to expire.
- numBlocks := int(forceCloseChan.BlocksTilMaturity)
- ht.MineEmptyBlocks(numBlocks)
-
- // Both Alice and Bob should offer their commit sweeps.
- ht.AssertNumPendingSweeps(alice, 2)
- ht.AssertNumPendingSweeps(bob, 1)
-
- // Mine a block to trigger the sweeps.
- ht.MineEmptyBlocks(1)
-
- // Both Alice and Bob should broadcast their commit sweeps.
- aliceCommitOutpoint := wire.OutPoint{
- Hash: aliceForceClose, Index: 3,
- }
- ht.AssertOutpointInMempool(aliceCommitOutpoint)
- bobCommitOutpoint := wire.OutPoint{Hash: closingTxid, Index: 3}
- ht.AssertOutpointInMempool(bobCommitOutpoint)
-
- // Confirm their sweeps.
- ht.MineBlocksAndAssertNumTxes(1, 2)
-
- // Alice and Bob should not show any pending channels anymore as
- // they have been fully resolved.
- for _, node := range []*node.HarnessNode{alice, bob} {
- ht.AssertNumPendingForceClose(node, 0)
- }
- }
-
- // The invoice should show as settled for Carol, indicating that it was
- // swept on-chain.
- invoice := ht.AssertInvoiceState(stream, lnrpc.Invoice_SETTLED)
- require.Equal(ht, int64(invoiceAmt), invoice.AmtPaidSat)
-
- // Finally, check that the Alice's payment is correctly marked
- // succeeded.
- ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED)
-}
-
-// testMultiHopHtlcAggregation tests that in a multi-hop HTLC scenario, if we
-// force close a channel with both incoming and outgoing HTLCs, we can properly
-// resolve them using the second level timeout and success transactions. In
-// case of anchor channels, the second-level spends can also be aggregated and
-// properly feebumped, so we'll check that as well.
-func testMultiHopHtlcAggregation(ht *lntest.HarnessTest) {
- runMultiHopHtlcClaimTest(ht, runMultiHopHtlcAggregation)
-}
-
-func runMultiHopHtlcAggregation(ht *lntest.HarnessTest,
- alice, bob *node.HarnessNode, c lnrpc.CommitmentType, zeroConf bool) {
-
- // We need one additional UTXO to create the sweeping tx for the
- // second-level success txes.
- ht.FundCoins(btcutil.SatoshiPerBitcoin, bob)
-
- // First, we'll create a three hop network: Alice -> Bob -> Carol.
- aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork(
- ht, alice, bob, false, c, zeroConf,
- )
-
- // If this is a taproot channel, then we'll need to make some manual
- // route hints so Alice+Carol can actually find a route.
- var (
- carolRouteHints []*lnrpc.RouteHint
- aliceRouteHints []*lnrpc.RouteHint
- )
- if c == lnrpc.CommitmentType_SIMPLE_TAPROOT {
- carolRouteHints = makeRouteHints(bob, carol, zeroConf)
- aliceRouteHints = makeRouteHints(bob, alice, zeroConf)
- }
-
- // To ensure we have capacity in both directions of the route, we'll
- // make a fairly large payment Alice->Carol and settle it.
- const reBalanceAmt = 500_000
- invoice := &lnrpc.Invoice{
- Value: reBalanceAmt,
- RouteHints: carolRouteHints,
- }
- resp := carol.RPC.AddInvoice(invoice)
- ht.CompletePaymentRequests(alice, []string{resp.PaymentRequest})
-
- // Make sure Carol has settled the invoice.
- ht.AssertInvoiceSettled(carol, resp.PaymentAddr)
-
- // With the network active, we'll now add a new hodl invoices at both
- // Alice's and Carol's end. Make sure the cltv expiry delta is large
- // enough, otherwise Bob won't send out the outgoing htlc.
- const numInvoices = 5
- const invoiceAmt = 50_000
-
- var (
- carolInvoices []*invoicesrpc.AddHoldInvoiceResp
- aliceInvoices []*invoicesrpc.AddHoldInvoiceResp
- alicePreimages []lntypes.Preimage
- payHashes [][]byte
- invoiceStreamsCarol []rpc.SingleInvoiceClient
- invoiceStreamsAlice []rpc.SingleInvoiceClient
- )
-
- // Add Carol invoices.
- for i := 0; i < numInvoices; i++ {
- var preimage lntypes.Preimage
- copy(preimage[:], ht.Random32Bytes())
- payHash := preimage.Hash()
- invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
- Value: invoiceAmt,
- CltvExpiry: finalCltvDelta,
- Hash: payHash[:],
- RouteHints: carolRouteHints,
- }
- carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq)
-
- carolInvoices = append(carolInvoices, carolInvoice)
- payHashes = append(payHashes, payHash[:])
-
- // Subscribe the invoice.
- stream := carol.RPC.SubscribeSingleInvoice(payHash[:])
- invoiceStreamsCarol = append(invoiceStreamsCarol, stream)
- }
-
- // We'll give Alice's invoices a longer CLTV expiry, to ensure the
- // channel Bob<->Carol will be closed first.
- for i := 0; i < numInvoices; i++ {
- var preimage lntypes.Preimage
- copy(preimage[:], ht.Random32Bytes())
- payHash := preimage.Hash()
- invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
- Value: invoiceAmt,
- CltvExpiry: thawHeightDelta - 4,
- Hash: payHash[:],
- RouteHints: aliceRouteHints,
- }
- aliceInvoice := alice.RPC.AddHoldInvoice(invoiceReq)
-
- aliceInvoices = append(aliceInvoices, aliceInvoice)
- alicePreimages = append(alicePreimages, preimage)
- payHashes = append(payHashes, payHash[:])
-
- // Subscribe the invoice.
- stream := alice.RPC.SubscribeSingleInvoice(payHash[:])
- invoiceStreamsAlice = append(invoiceStreamsAlice, stream)
- }
-
- // Now that we've created the invoices, we'll pay them all from
- // Alice<->Carol, going through Bob. We won't wait for the response
- // however, as neither will immediately settle the payment.
-
- // Alice will pay all of Carol's invoices.
- for _, carolInvoice := range carolInvoices {
- req := &routerrpc.SendPaymentRequest{
- PaymentRequest: carolInvoice.PaymentRequest,
- TimeoutSeconds: 60,
- FeeLimitMsat: noFeeLimitMsat,
- }
- alice.RPC.SendPayment(req)
- }
-
- // And Carol will pay Alice's.
- for _, aliceInvoice := range aliceInvoices {
- req := &routerrpc.SendPaymentRequest{
- PaymentRequest: aliceInvoice.PaymentRequest,
- TimeoutSeconds: 60,
- FeeLimitMsat: noFeeLimitMsat,
- }
- carol.RPC.SendPayment(req)
- }
-
- // At this point, all 3 nodes should now the HTLCs active on their
- // channels.
- ht.AssertActiveHtlcs(alice, payHashes...)
- ht.AssertActiveHtlcs(bob, payHashes...)
- ht.AssertActiveHtlcs(carol, payHashes...)
-
- // Wait for Alice and Carol to mark the invoices as accepted. There is
- // a small gap to bridge between adding the htlc to the channel and
- // executing the exit hop logic.
- for _, stream := range invoiceStreamsCarol {
- ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED)
- }
-
- for _, stream := range invoiceStreamsAlice {
- ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED)
- }
-
- // Increase the fee estimate so that the following force close tx will
- // be cpfp'ed.
- ht.SetFeeEstimate(30000)
-
- // We want Carol's htlcs to expire off-chain to demonstrate bob's force
- // close. However, Carol will cancel her invoices to prevent force
- // closes, so we shut her down for now.
- restartCarol := ht.SuspendNode(carol)
-
- // We'll now mine enough blocks to trigger Bob's broadcast of his
- // commitment transaction due to the fact that the Carol's HTLCs are
- // about to timeout. With the default outgoing broadcast delta of zero,
- // this will be the same height as the htlc expiry height.
- numBlocks := padCLTV(
- uint32(finalCltvDelta - lncfg.DefaultOutgoingBroadcastDelta),
- )
- ht.MineEmptyBlocks(int(numBlocks))
-
- // Bob's force close transaction should now be found in the mempool. If
- // there are anchors, we expect it to be offered to Bob's sweeper.
- ht.AssertNumTxsInMempool(1)
-
- // Bob has two anchor sweep requests, one for remote (invalid) and the
- // other for local.
- ht.AssertNumPendingSweeps(bob, 2)
-
- closeTx := ht.AssertOutpointInMempool(
- ht.OutPointFromChannelPoint(bobChanPoint),
- )
- closeTxid := closeTx.TxHash()
-
- // Go through the closing transaction outputs, and make an index for
- // the HTLC outputs.
- successOuts := make(map[wire.OutPoint]struct{})
- timeoutOuts := make(map[wire.OutPoint]struct{})
- for i, txOut := range closeTx.TxOut {
- op := wire.OutPoint{
- Hash: closeTxid,
- Index: uint32(i),
- }
-
- switch txOut.Value {
- // If this HTLC goes towards Carol, Bob will claim it with a
- // timeout Tx. In this case the value will be the invoice
- // amount.
- case invoiceAmt:
- timeoutOuts[op] = struct{}{}
-
- // If the HTLC has direction towards Alice, Bob will claim it
- // with the success TX when he learns the preimage. In this
- // case one extra sat will be on the output, because of the
- // routing fee.
- case invoiceAmt + 1:
- successOuts[op] = struct{}{}
- }
- }
-
- // Once bob has force closed, we can restart carol.
- require.NoError(ht, restartCarol())
-
- // Mine a block to confirm the closing transaction.
- ht.MineBlocksAndAssertNumTxes(1, 1)
-
- // The above mined block will trigger Bob to sweep his anchor output.
- ht.AssertNumTxsInMempool(1)
-
- // Let Alice settle her invoices. When Bob now gets the preimages, he
- // has no other option than to broadcast his second-level transactions
- // to claim the money.
- for _, preimage := range alicePreimages {
- alice.RPC.SettleInvoice(preimage[:])
- }
-
- expectedTxes := 0
- switch c {
- // In case of anchors, all success transactions will be aggregated into
- // one, the same is the case for the timeout transactions. In this case
- // Carol will also sweep her commitment and anchor output in a single
- // tx.
- case lnrpc.CommitmentType_ANCHORS,
- lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE,
- lnrpc.CommitmentType_SIMPLE_TAPROOT:
-
- // Bob should have `numInvoices` for both HTLC success and
- // timeout txns, plus one anchor sweep.
- ht.AssertNumPendingSweeps(bob, numInvoices*2+1)
-
- // Carol should have commit and anchor outputs.
- ht.AssertNumPendingSweeps(carol, 2)
-
- // We expect to see three sweeping txns:
- // 1. Bob's sweeping tx for all timeout HTLCs.
- // 2. Bob's sweeping tx for all success HTLCs.
- // 3. Carol's sweeping tx for her commit and anchor outputs.
- expectedTxes = 3
-
- default:
- ht.Fatalf("unhandled commitment type %v", c)
- }
-
- // Mine a block to confirm Bob's anchor sweeping, which will also
- // trigger his sweeper to sweep HTLCs.
- ht.MineBlocksAndAssertNumTxes(1, 1)
-
- // Assert the sweeping txns are found in the mempool.
- txes := ht.GetNumTxsFromMempool(expectedTxes)
-
- // Since Bob can aggregate the transactions, we expect a single
- // transaction, that have multiple spends from the commitment.
- var (
- timeoutTxs []*chainhash.Hash
- successTxs []*chainhash.Hash
- )
- for _, tx := range txes {
- txid := tx.TxHash()
-
- for i := range tx.TxIn {
- prevOp := tx.TxIn[i].PreviousOutPoint
- if _, ok := successOuts[prevOp]; ok {
- successTxs = append(successTxs, &txid)
-
- break
- }
-
- if _, ok := timeoutOuts[prevOp]; ok {
- timeoutTxs = append(timeoutTxs, &txid)
-
- break
- }
- }
- }
-
- // In case of anchor we expect all the timeout and success second
- // levels to be aggregated into one tx. For earlier channel types, they
- // will be separate transactions.
- if lntest.CommitTypeHasAnchors(c) {
- require.Len(ht, timeoutTxs, 1)
- require.Len(ht, successTxs, 1)
- } else {
- require.Len(ht, timeoutTxs, numInvoices)
- require.Len(ht, successTxs, numInvoices)
- }
-
- // All mempool transactions should be spending from the commitment
- // transaction.
- ht.AssertAllTxesSpendFrom(txes, closeTxid)
-
- // Mine a block to confirm the all the transactions, including Carol's
- // commitment tx, anchor tx(optional), and Bob's second-level timeout
- // and success txes.
- ht.MineBlocksAndAssertNumTxes(1, expectedTxes)
-
- // At this point, Bob should have broadcast his second layer success
- // transaction, and should have sent it to the nursery for incubation,
- // or to the sweeper for sweeping.
- forceCloseChan := ht.AssertNumPendingForceClose(bob, 1)[0]
- ht.Logf("Bob's timelock on commit=%v, timelock on htlc=%v",
- forceCloseChan.BlocksTilMaturity,
- forceCloseChan.PendingHtlcs[0].BlocksTilMaturity)
-
- // For this channel, we also check the number of HTLCs and the stage
- // are correct.
- ht.AssertNumHTLCsAndStage(bob, bobChanPoint, numInvoices*2, 2)
-
- if c != lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
- // If we then mine additional blocks, Bob can sweep his
- // commitment output.
- ht.MineEmptyBlocks(1)
-
- // Assert the tx has been offered to the sweeper.
- ht.AssertNumPendingSweeps(bob, 1)
-
- // Mine one block to trigger the sweep.
- ht.MineEmptyBlocks(1)
-
- // Find the commitment sweep.
- bobCommitSweep := ht.GetNumTxsFromMempool(1)[0]
- ht.AssertTxSpendFrom(bobCommitSweep, closeTxid)
-
- // Also ensure it is not spending from any of the HTLC output.
- for _, txin := range bobCommitSweep.TxIn {
- for _, timeoutTx := range timeoutTxs {
- require.NotEqual(ht, *timeoutTx,
- txin.PreviousOutPoint.Hash,
- "found unexpected spend of timeout tx")
- }
-
- for _, successTx := range successTxs {
- require.NotEqual(ht, *successTx,
- txin.PreviousOutPoint.Hash,
- "found unexpected spend of success tx")
- }
- }
- }
-
- switch c {
- // Mining one additional block, Bob's second level tx is mature, and he
- // can sweep the output. Before the blocks are mined, we should expect
- // to see Bob's commit sweep in the mempool.
- case lnrpc.CommitmentType_ANCHORS, lnrpc.CommitmentType_SIMPLE_TAPROOT:
- ht.MineBlocksAndAssertNumTxes(1, 1)
-
- // Since Bob is the initiator of the Bob-Carol script-enforced leased
- // channel, he incurs an additional CLTV when sweeping outputs back to
- // his wallet. We'll need to mine enough blocks for the timelock to
- // expire to prompt his broadcast.
- case lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE:
- resp := bob.RPC.PendingChannels()
- require.Len(ht, resp.PendingForceClosingChannels, 1)
- forceCloseChan := resp.PendingForceClosingChannels[0]
- require.Positive(ht, forceCloseChan.BlocksTilMaturity)
- numBlocks := uint32(forceCloseChan.BlocksTilMaturity)
-
- // Add debug log.
- height := ht.CurrentHeight()
- bob.AddToLogf("itest: now mine %d blocks at height %d",
- numBlocks, height)
- ht.MineEmptyBlocks(int(numBlocks) - 1)
-
- default:
- ht.Fatalf("unhandled commitment type %v", c)
- }
-
- // Make sure Bob's sweeper has received all the sweeping requests.
- ht.AssertNumPendingSweeps(bob, numInvoices*2)
-
- // Mine one block to trigger the sweeps.
- ht.MineEmptyBlocks(1)
-
- // For leased channels, Bob's commit output will mature after the above
- // block.
- if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
- ht.AssertNumPendingSweeps(bob, numInvoices*2+1)
- }
-
- // We now wait for 30 seconds to overcome the flake - there's a block
- // race between contractcourt and sweeper, causing the sweep to be
- // broadcast earlier.
- //
- // TODO(yy): remove this once `blockbeat` is in place.
- numExpected := 1
- err := wait.NoError(func() error {
- mem := ht.GetRawMempool()
- if len(mem) == numExpected {
- return nil
- }
-
- if len(mem) > 0 {
- numExpected = len(mem)
- }
-
- return fmt.Errorf("want %d, got %v in mempool: %v", numExpected,
- len(mem), mem)
- }, wait.DefaultTimeout)
- ht.Logf("Checking mempool got: %v", err)
-
- // Make sure it spends from the second level tx.
- secondLevelSweep := ht.GetNumTxsFromMempool(numExpected)[0]
- bobSweep := secondLevelSweep.TxHash()
-
- // It should be sweeping all the second-level outputs.
- var secondLvlSpends int
- for _, txin := range secondLevelSweep.TxIn {
- for _, timeoutTx := range timeoutTxs {
- if *timeoutTx == txin.PreviousOutPoint.Hash {
- secondLvlSpends++
- }
- }
-
- for _, successTx := range successTxs {
- if *successTx == txin.PreviousOutPoint.Hash {
- secondLvlSpends++
- }
- }
- }
-
- // TODO(yy): bring the following check back when `blockbeat` is in
- // place - atm we may have two sweeping transactions in the mempool.
- // require.Equal(ht, 2*numInvoices, secondLvlSpends)
-
- // When we mine one additional block, that will confirm Bob's second
- // level sweep. Now Bob should have no pending channels anymore, as
- // this just resolved it by the confirmation of the sweep transaction.
- block := ht.MineBlocksAndAssertNumTxes(1, numExpected)[0]
- ht.AssertTxInBlock(block, bobSweep)
-
- // For leased channels, we need to mine one more block to confirm Bob's
- // commit output sweep.
- //
- // NOTE: we mine this block conditionally, as the commit output may
- // have already been swept one block earlier due to the race in block
- // consumption among subsystems.
- pendingChanResp := bob.RPC.PendingChannels()
- if len(pendingChanResp.PendingForceClosingChannels) != 0 {
- ht.MineBlocksAndAssertNumTxes(1, 1)
- }
- ht.AssertNumPendingForceClose(bob, 0)
-
- // THe channel with Alice is still open.
- ht.AssertNodeNumChannels(bob, 1)
-
- // Carol should have no channels left (open nor pending).
- ht.AssertNumPendingForceClose(carol, 0)
- ht.AssertNodeNumChannels(carol, 0)
-
- // Coop close, no anchors.
- ht.CloseChannel(alice, aliceChanPoint)
-}
-
-// createThreeHopNetwork creates a topology of `Alice -> Bob -> Carol`.
-func createThreeHopNetwork(ht *lntest.HarnessTest,
- alice, bob *node.HarnessNode, carolHodl bool, c lnrpc.CommitmentType,
- zeroConf bool) (*lnrpc.ChannelPoint,
- *lnrpc.ChannelPoint, *node.HarnessNode) {
-
- ht.EnsureConnected(alice, bob)
-
- // We'll create a new node "carol" and have Bob connect to her.
- // If the carolHodl flag is set, we'll make carol always hold onto the
- // HTLC, this way it'll force Bob to go to chain to resolve the HTLC.
- carolFlags := lntest.NodeArgsForCommitType(c)
- if carolHodl {
- carolFlags = append(carolFlags, "--hodl.exit-settle")
- }
-
- if zeroConf {
- carolFlags = append(
- carolFlags, "--protocol.option-scid-alias",
- "--protocol.zero-conf",
- )
- }
- carol := ht.NewNode("Carol", carolFlags)
-
- ht.ConnectNodes(bob, carol)
-
- // Make sure there are enough utxos for anchoring. Because the anchor
- // by itself often doesn't meet the dust limit, a utxo from the wallet
- // needs to be attached as an additional input. This can still lead to
- // a positively-yielding transaction.
- for i := 0; i < 2; i++ {
- ht.FundCoinsUnconfirmed(btcutil.SatoshiPerBitcoin, alice)
- ht.FundCoinsUnconfirmed(btcutil.SatoshiPerBitcoin, bob)
- ht.FundCoinsUnconfirmed(btcutil.SatoshiPerBitcoin, carol)
-
- // Mine 1 block to get the above coins confirmed.
- ht.MineBlocksAndAssertNumTxes(1, 3)
- }
-
- // We'll start the test by creating a channel between Alice and Bob,
- // which will act as the first leg for out multi-hop HTLC.
- const chanAmt = 1000000
- var aliceFundingShim *lnrpc.FundingShim
- var thawHeight uint32
- if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
- minerHeight := ht.CurrentHeight()
- thawHeight = minerHeight + thawHeightDelta
- aliceFundingShim, _ = deriveFundingShim(
- ht, alice, bob, chanAmt, thawHeight, true, c,
- )
- }
-
- var privateChan bool
- if c == lnrpc.CommitmentType_SIMPLE_TAPROOT {
- privateChan = true
- }
-
- aliceParams := lntest.OpenChannelParams{
- Private: privateChan,
- Amt: chanAmt,
- CommitmentType: c,
- FundingShim: aliceFundingShim,
- ZeroConf: zeroConf,
- }
-
- // If the channel type is taproot, then use an explicit channel type to
- // open it.
- if c == lnrpc.CommitmentType_SIMPLE_TAPROOT {
- aliceParams.CommitmentType = lnrpc.CommitmentType_SIMPLE_TAPROOT
- }
-
- // We'll create a channel from Bob to Carol. After this channel is
- // open, our topology looks like: A -> B -> C.
- var bobFundingShim *lnrpc.FundingShim
- if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
- bobFundingShim, _ = deriveFundingShim(
- ht, bob, carol, chanAmt, thawHeight, true, c,
- )
- }
-
- // Prepare params for Bob.
- bobParams := lntest.OpenChannelParams{
- Amt: chanAmt,
- Private: privateChan,
- CommitmentType: c,
- FundingShim: bobFundingShim,
- ZeroConf: zeroConf,
- }
-
- // If the channel type is taproot, then use an explicit channel type to
- // open it.
- if c == lnrpc.CommitmentType_SIMPLE_TAPROOT {
- bobParams.CommitmentType = lnrpc.CommitmentType_SIMPLE_TAPROOT
- }
-
- var (
- acceptStreamBob rpc.AcceptorClient
- acceptStreamCarol rpc.AcceptorClient
- cancelBob context.CancelFunc
- cancelCarol context.CancelFunc
- )
-
- // If a zero-conf channel is being opened, the nodes are signalling the
- // zero-conf feature bit. Setup a ChannelAcceptor for the fundee.
- if zeroConf {
- acceptStreamBob, cancelBob = bob.RPC.ChannelAcceptor()
- go acceptChannel(ht.T, true, acceptStreamBob)
-
- acceptStreamCarol, cancelCarol = carol.RPC.ChannelAcceptor()
- go acceptChannel(ht.T, true, acceptStreamCarol)
- }
-
- // Open channels in batch to save blocks mined.
- reqs := []*lntest.OpenChannelRequest{
- {Local: alice, Remote: bob, Param: aliceParams},
- {Local: bob, Remote: carol, Param: bobParams},
- }
- resp := ht.OpenMultiChannelsAsync(reqs)
- aliceChanPoint := resp[0]
- bobChanPoint := resp[1]
-
- // Make sure alice and carol know each other's channels.
- //
- // We'll only do this though if it wasn't a private channel we opened
- // earlier.
- if !privateChan {
- ht.AssertChannelInGraph(alice, bobChanPoint)
- ht.AssertChannelInGraph(carol, aliceChanPoint)
- } else {
- // Otherwise, we want to wait for all the channels to be shown
- // as active before we proceed.
- ht.AssertChannelExists(alice, aliceChanPoint)
- ht.AssertChannelExists(carol, bobChanPoint)
- }
-
- // Remove the ChannelAcceptor for Bob and Carol.
- if zeroConf {
- cancelBob()
- cancelCarol()
- }
-
- return aliceChanPoint, bobChanPoint, carol
-}
-
-// testHtlcTimeoutResolverExtractPreimageRemote tests that in the multi-hop
-// setting, Alice->Bob->Carol, when Bob's outgoing HTLC is swept by Carol using
-// the 2nd level success tx2nd level success tx, Bob's timeout resolver will
-// extract the preimage from the sweep tx found in mempool or blocks(for
-// neutrino). The 2nd level success tx is broadcast by Carol and spends the
-// outpoint on her commit tx.
-func testHtlcTimeoutResolverExtractPreimageRemote(ht *lntest.HarnessTest) {
- runMultiHopHtlcClaimTest(ht, runExtraPreimageFromRemoteCommit)
-}
-
-// runExtraPreimageFromRemoteCommit checks that Bob's htlc timeout resolver
-// will extract the preimage from the 2nd level success tx broadcast by Carol
-// which spends the htlc output on her commitment tx.
-func runExtraPreimageFromRemoteCommit(ht *lntest.HarnessTest,
- alice, bob *node.HarnessNode, c lnrpc.CommitmentType, zeroConf bool) {
-
- // First, we'll create a three hop network: Alice -> Bob -> Carol, with
- // Carol refusing to actually settle or directly cancel any HTLC's
- // self.
- aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork(
- ht, alice, bob, false, c, zeroConf,
- )
-
- if ht.IsNeutrinoBackend() {
- ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
- }
-
- // If this is a taproot channel, then we'll need to make some manual
- // route hints so Alice can actually find a route.
- var routeHints []*lnrpc.RouteHint
- if c == lnrpc.CommitmentType_SIMPLE_TAPROOT {
- routeHints = makeRouteHints(bob, carol, zeroConf)
- }
-
- // With the network active, we'll now add a new hodl invoice at Carol's
- // end. Make sure the cltv expiry delta is large enough, otherwise Bob
- // won't send out the outgoing htlc.
- preimage := ht.RandomPreimage()
- payHash := preimage.Hash()
- invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
- Value: 100_000,
- CltvExpiry: finalCltvDelta,
- Hash: payHash[:],
- RouteHints: routeHints,
- }
- eveInvoice := carol.RPC.AddHoldInvoice(invoiceReq)
-
- // Subscribe the invoice.
- stream := carol.RPC.SubscribeSingleInvoice(payHash[:])
-
- // Now that we've created the invoice, we'll send a single payment from
- // Alice to Carol. We won't wait for the response however, as Carol
- // will not immediately settle the payment.
- req := &routerrpc.SendPaymentRequest{
- PaymentRequest: eveInvoice.PaymentRequest,
- TimeoutSeconds: 60,
- FeeLimitMsat: noFeeLimitMsat,
- }
- alice.RPC.SendPayment(req)
-
- // Once the payment sent, Alice should have one outgoing HTLC active.
- ht.AssertOutgoingHTLCActive(alice, aliceChanPoint, payHash[:])
-
- // Bob should have two HTLCs active. One incoming HTLC from Alice, and
- // one outgoing to Carol.
- ht.AssertIncomingHTLCActive(bob, aliceChanPoint, payHash[:])
- htlc := ht.AssertOutgoingHTLCActive(bob, bobChanPoint, payHash[:])
-
- // Carol should have one incoming HTLC from Bob.
- ht.AssertIncomingHTLCActive(carol, bobChanPoint, payHash[:])
-
- // Wait for Carol to mark invoice as accepted. There is a small gap to
- // bridge between adding the htlc to the channel and executing the exit
- // hop logic.
- ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED)
-
- // Bob now goes offline so the link between Bob and Carol is broken.
- restartBob := ht.SuspendNode(bob)
-
- // Carol now settles the invoice, since her link with Bob is broken,
- // Bob won't know the preimage.
- carol.RPC.SettleInvoice(preimage[:])
-
- // We'll now mine enough blocks to trigger Carol's broadcast of her
- // commitment transaction due to the fact that the HTLC is about to
- // timeout. With the default incoming broadcast delta of 10, this
- // will be the htlc expiry height minus 10.
- numBlocks := padCLTV(uint32(
- invoiceReq.CltvExpiry - lncfg.DefaultIncomingBroadcastDelta,
- ))
- ht.MineEmptyBlocks(int(numBlocks))
-
- // Carol's force close transaction should now be found in the mempool.
- // If there are anchors, we also expect Carol's contractcourt to offer
- // the anchors to her sweeper - one from the local commitment and the
- // other from the remote.
- ht.AssertNumPendingSweeps(carol, 2)
-
- // We now mine a block to confirm Carol's closing transaction, which
- // will trigger her sweeper to sweep her CPFP anchor sweeping.
- ht.MineClosingTx(bobChanPoint)
-
- // With the closing transaction confirmed, we should expect Carol's
- // HTLC success transaction to be offered to the sweeper along with her
- // anchor output.
- ht.AssertNumPendingSweeps(carol, 2)
-
- // Mine a block to trigger the sweep, and clean up the anchor sweeping
- // tx.
- ht.MineBlocksAndAssertNumTxes(1, 1)
- ht.AssertNumTxsInMempool(1)
-
- // Restart Bob. Once he finishes syncing the channel state, he should
- // notice the force close from Carol.
- require.NoError(ht, restartBob())
-
- // Get the current height to compute number of blocks to mine to
- // trigger the htlc timeout resolver from Bob.
- height := ht.CurrentHeight()
-
- // We'll now mine enough blocks to trigger Bob's timeout resolver.
- numBlocks = htlc.ExpirationHeight - height -
- lncfg.DefaultOutgoingBroadcastDelta
-
- // We should now have Carol's htlc success tx in the mempool.
- numTxesMempool := 1
- ht.AssertNumTxsInMempool(numTxesMempool)
-
- // For neutrino backend, the timeout resolver needs to extract the
- // preimage from the blocks.
- if ht.IsNeutrinoBackend() {
- // Mine a block to confirm Carol's 2nd level success tx.
- ht.MineBlocksAndAssertNumTxes(1, 1)
- numBlocks--
- }
-
- // Mine empty blocks so Carol's htlc success tx stays in mempool. Once
- // the height is reached, Bob's timeout resolver will resolve the htlc
- // by extracing the preimage from the mempool.
- ht.MineEmptyBlocks(int(numBlocks))
-
- // Finally, check that the Alice's payment is marked as succeeded as
- // Bob has settled the htlc using the preimage extracted from Carol's
- // 2nd level success tx.
- ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED)
-
- switch c {
- // For anchor channel type, we should expect to see Bob's commit output
- // and his anchor output be swept in a single tx in the mempool.
- case lnrpc.CommitmentType_ANCHORS, lnrpc.CommitmentType_SIMPLE_TAPROOT:
- numTxesMempool++
-
- // For script-enforced leased channel, Bob's anchor sweep tx won't
- // happen as it's not used for CPFP, hence no wallet utxo is used so
- // it'll be uneconomical.
- case lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE:
- }
-
- // For neutrino backend, Carol's second-stage sweep should be offered
- // to her sweeper.
- if ht.IsNeutrinoBackend() {
- ht.AssertNumPendingSweeps(carol, 1)
-
- // Mine a block to trigger the sweep.
- ht.MineEmptyBlocks(1)
- }
-
- // Mine a block to clean the mempool.
- ht.MineBlocksAndAssertNumTxes(1, numTxesMempool)
-
- // NOTE: for non-standby nodes there's no need to clean up the force
- // close as long as the mempool is cleaned.
- ht.CleanShutDown()
-}
-
-// testHtlcTimeoutResolverExtractPreimage tests that in the multi-hop setting,
-// Alice->Bob->Carol, when Bob's outgoing HTLC is swept by Carol using the
-// direct preimage spend, Bob's timeout resolver will extract the preimage from
-// the sweep tx found in mempool or blocks(for neutrino). The direct spend tx
-// is broadcast by Carol and spends the outpoint on Bob's commit tx.
-func testHtlcTimeoutResolverExtractPreimageLocal(ht *lntest.HarnessTest) {
- runMultiHopHtlcClaimTest(ht, runExtraPreimageFromLocalCommit)
-}
-
-// runExtraPreimageFromLocalCommit checks that Bob's htlc timeout resolver will
-// extract the preimage from the direct spend broadcast by Carol which spends
-// the htlc output on Bob's commitment tx.
-func runExtraPreimageFromLocalCommit(ht *lntest.HarnessTest,
- alice, bob *node.HarnessNode, c lnrpc.CommitmentType, zeroConf bool) {
-
- // First, we'll create a three hop network: Alice -> Bob -> Carol, with
- // Carol refusing to actually settle or directly cancel any HTLC's
- // self.
- aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork(
- ht, alice, bob, false, c, zeroConf,
- )
-
- // If this is a taproot channel, then we'll need to make some manual
- // route hints so Alice can actually find a route.
- var routeHints []*lnrpc.RouteHint
- if c == lnrpc.CommitmentType_SIMPLE_TAPROOT {
- routeHints = makeRouteHints(bob, carol, zeroConf)
- }
-
- // With the network active, we'll now add a new hodl invoice at Carol's
- // end. Make sure the cltv expiry delta is large enough, otherwise Bob
- // won't send out the outgoing htlc.
- preimage := ht.RandomPreimage()
- payHash := preimage.Hash()
- invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
- Value: 100_000,
- CltvExpiry: finalCltvDelta,
- Hash: payHash[:],
- RouteHints: routeHints,
- }
- carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq)
-
- // Subscribe the invoice.
- stream := carol.RPC.SubscribeSingleInvoice(payHash[:])
-
- // Now that we've created the invoice, we'll send a single payment from
- // Alice to Carol. We won't wait for the response however, as Carol
- // will not immediately settle the payment.
- req := &routerrpc.SendPaymentRequest{
- PaymentRequest: carolInvoice.PaymentRequest,
- TimeoutSeconds: 60,
- FeeLimitMsat: noFeeLimitMsat,
- }
- alice.RPC.SendPayment(req)
-
- // Once the payment sent, Alice should have one outgoing HTLC active.
- ht.AssertOutgoingHTLCActive(alice, aliceChanPoint, payHash[:])
-
- // Bob should have two HTLCs active. One incoming HTLC from Alice, and
- // one outgoing to Carol.
- ht.AssertIncomingHTLCActive(bob, aliceChanPoint, payHash[:])
- htlc := ht.AssertOutgoingHTLCActive(bob, bobChanPoint, payHash[:])
-
- // Carol should have one incoming HTLC from Bob.
- ht.AssertIncomingHTLCActive(carol, bobChanPoint, payHash[:])
-
- // Wait for Carol to mark invoice as accepted. There is a small gap to
- // bridge between adding the htlc to the channel and executing the exit
- // hop logic.
- ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED)
-
- // Bob now goes offline so the link between Bob and Carol is broken.
- restartBob := ht.SuspendNode(bob)
-
- // Carol now settles the invoice, since her link with Bob is broken,
- // Bob won't know the preimage.
- carol.RPC.SettleInvoice(preimage[:])
-
- // Stop Carol so it's easier to check the mempool's state since she
- // will broadcast the anchor sweeping once Bob force closes.
- restartCarol := ht.SuspendNode(carol)
-
- // Restart Bob to force close the channel.
- require.NoError(ht, restartBob())
-
- // Bob force closes the channel, which gets his commitment tx into the
- // mempool.
- ht.CloseChannelAssertPending(bob, bobChanPoint, true)
-
- // Bob should now has offered his anchors to his sweeper - both local
- // and remote versions.
- ht.AssertNumPendingSweeps(bob, 2)
-
- // Mine Bob's force close tx.
- closeTx := ht.MineClosingTx(bobChanPoint)
-
- // Mine Bob's anchor sweeping tx.
- ht.MineBlocksAndAssertNumTxes(1, 1)
- blocksMined := 1
-
- // We'll now mine enough blocks to trigger Carol's sweeping of the htlc
- // via the direct spend. With the default incoming broadcast delta of
- // 10, this will be the htlc expiry height minus 10.
- //
- // NOTE: we need to mine 1 fewer block as we've already mined one to
- // confirm Bob's force close tx.
- numBlocks := padCLTV(uint32(
- invoiceReq.CltvExpiry - lncfg.DefaultIncomingBroadcastDelta - 1,
- ))
-
- // If this is a nont script-enforced channel, Bob will be able to sweep
- // his commit output after 4 blocks.
- if c != lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
- // Mine 3 blocks so the output will be offered to the sweeper.
- ht.MineEmptyBlocks(defaultCSV - blocksMined - 1)
-
- // Assert the commit output has been offered to the sweeper.
- ht.AssertNumPendingSweeps(bob, 1)
-
- // Mine a block to trigger the sweep.
- ht.MineEmptyBlocks(1)
- blocksMined = defaultCSV
- }
-
- // Mine empty blocks so it's easier to check Bob's sweeping txes below.
- ht.MineEmptyBlocks(int(numBlocks) - blocksMined)
-
- // With the above blocks mined, we should expect Carol's to offer the
- // htlc output on Bob's commitment to the sweeper.
- //
- // TODO(yy): it's not offered to the sweeper yet, instead, the utxo
- // nursery is creating and broadcasting the sweep tx - we should unify
- // this behavior and offer it to the sweeper.
- // ht.AssertNumPendingSweeps(carol, 1)
-
- // Increase the fee rate used by the sweeper so Carol's direct spend tx
- // won't be replaced by Bob's timeout tx.
- ht.SetFeeEstimate(30000)
-
- // Restart Carol to sweep the htlc output.
- require.NoError(ht, restartCarol())
-
- ht.AssertNumPendingSweeps(carol, 2)
- ht.MineEmptyBlocks(1)
-
- // Construct the htlc output on Bob's commitment tx, and decide its
- // index based on the commit type below.
- htlcOutpoint := wire.OutPoint{Hash: closeTx.TxHash()}
-
- // Check the current mempool state and we should see,
- // - Carol's direct spend tx.
- // - Bob's local output sweep tx, if this is NOT script enforced lease.
- // - Carol's anchor sweep tx cannot be broadcast as it's uneconomical.
- switch c {
- case lnrpc.CommitmentType_ANCHORS, lnrpc.CommitmentType_SIMPLE_TAPROOT:
- htlcOutpoint.Index = 2
- ht.AssertNumTxsInMempool(2)
-
- case lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE:
- htlcOutpoint.Index = 2
- ht.AssertNumTxsInMempool(1)
- }
-
- // Get the current height to compute number of blocks to mine to
- // trigger the timeout resolver from Bob.
- height := ht.CurrentHeight()
-
- // We'll now mine enough blocks to trigger Bob's htlc timeout resolver
- // to act. Once his timeout resolver starts, it will extract the
- // preimage from Carol's direct spend tx found in the mempool.
- numBlocks = htlc.ExpirationHeight - height -
- lncfg.DefaultOutgoingBroadcastDelta
-
- // Decrease the fee rate used by the sweeper so Bob's timeout tx will
- // not replace Carol's direct spend tx.
- ht.SetFeeEstimate(1000)
-
- // Mine empty blocks so Carol's direct spend tx stays in mempool. Once
- // the height is reached, Bob's timeout resolver will resolve the htlc
- // by extracing the preimage from the mempool.
- ht.MineEmptyBlocks(int(numBlocks))
-
- // For neutrino backend, the timeout resolver needs to extract the
- // preimage from the blocks.
- if ht.IsNeutrinoBackend() {
- // Make sure the direct spend tx is still in the mempool.
- ht.AssertOutpointInMempool(htlcOutpoint)
-
- // Mine a block to confirm two txns,
- // - Carol's direct spend tx.
- // - Bob's to_local output sweep tx.
- if c != lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
- ht.MineBlocksAndAssertNumTxes(1, 2)
- } else {
- ht.MineBlocksAndAssertNumTxes(1, 1)
- }
- }
-
- // Finally, check that the Alice's payment is marked as succeeded as
- // Bob has settled the htlc using the preimage extracted from Carol's
- // direct spend tx.
- ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED)
-
- // NOTE: for non-standby nodes there's no need to clean up the force
- // close as long as the mempool is cleaned.
- ht.CleanShutDown()
-}
diff --git a/itest/lnd_network_test.go b/itest/lnd_network_test.go
index fd17d04657..44a0dcffa5 100644
--- a/itest/lnd_network_test.go
+++ b/itest/lnd_network_test.go
@@ -126,16 +126,14 @@ func testReconnectAfterIPChange(ht *lntest.HarnessTest) {
}
// Connect Alice to Dave and Charlie.
- alice := ht.Alice
+ alice := ht.NewNodeWithCoins("Alice", nil)
ht.ConnectNodes(alice, dave)
ht.ConnectNodes(alice, charlie)
// We'll then go ahead and open a channel between Alice and Dave. This
// ensures that Charlie receives the node announcement from Alice as
// part of the announcement broadcast.
- chanPoint := ht.OpenChannel(
- alice, dave, lntest.OpenChannelParams{Amt: 1000000},
- )
+ ht.OpenChannel(alice, dave, lntest.OpenChannelParams{Amt: 1000000})
// waitForNodeAnnouncement is a closure used to wait on the given graph
// subscription for a node announcement from a node with the given
@@ -210,15 +208,12 @@ func testReconnectAfterIPChange(ht *lntest.HarnessTest) {
// address to one not listed in Dave's original advertised list of
// addresses.
ht.AssertConnected(dave, charlie)
-
- // Finally, close the channel.
- ht.CloseChannel(alice, chanPoint)
}
// testAddPeerConfig tests that the "--addpeer" config flag successfully adds
// a new peer.
func testAddPeerConfig(ht *lntest.HarnessTest) {
- alice := ht.Alice
+ alice := ht.NewNode("Alice", nil)
info := alice.RPC.GetInfo()
alicePeerAddress := info.Uris[0]
diff --git a/itest/lnd_neutrino_test.go b/itest/lnd_neutrino_test.go
index 2c551362a2..22f19d848e 100644
--- a/itest/lnd_neutrino_test.go
+++ b/itest/lnd_neutrino_test.go
@@ -13,8 +13,10 @@ func testNeutrino(ht *lntest.HarnessTest) {
ht.Skipf("skipping test for non neutrino backends")
}
+ alice := ht.NewNode("Alice", nil)
+
// Check if the neutrino sub server is running.
- statusRes := ht.Alice.RPC.Status(nil)
+ statusRes := alice.RPC.Status(nil)
require.True(ht, statusRes.Active)
require.Len(ht, statusRes.Peers, 1, "unable to find a peer")
@@ -22,11 +24,11 @@ func testNeutrino(ht *lntest.HarnessTest) {
cFilterReq := &neutrinorpc.GetCFilterRequest{
Hash: statusRes.GetBlockHash(),
}
- ht.Alice.RPC.GetCFilter(cFilterReq)
+ alice.RPC.GetCFilter(cFilterReq)
// Try to reconnect to a connected peer.
addPeerReq := &neutrinorpc.AddPeerRequest{
PeerAddrs: statusRes.Peers[0],
}
- ht.Alice.RPC.AddPeer(addPeerReq)
+ alice.RPC.AddPeer(addPeerReq)
}
diff --git a/itest/lnd_onchain_test.go b/itest/lnd_onchain_test.go
index a34b9a2e47..3a32a8c6a5 100644
--- a/itest/lnd_onchain_test.go
+++ b/itest/lnd_onchain_test.go
@@ -33,8 +33,10 @@ func testChainKit(ht *lntest.HarnessTest) {
// testChainKitGetBlock ensures that given a block hash, the RPC endpoint
// returns the correct target block.
func testChainKitGetBlock(ht *lntest.HarnessTest) {
+ alice := ht.NewNode("Alice", nil)
+
// Get best block hash.
- bestBlockRes := ht.Alice.RPC.GetBestBlock(nil)
+ bestBlockRes := alice.RPC.GetBestBlock(nil)
var bestBlockHash chainhash.Hash
err := bestBlockHash.SetBytes(bestBlockRes.BlockHash)
@@ -44,7 +46,7 @@ func testChainKitGetBlock(ht *lntest.HarnessTest) {
getBlockReq := &chainrpc.GetBlockRequest{
BlockHash: bestBlockHash[:],
}
- getBlockRes := ht.Alice.RPC.GetBlock(getBlockReq)
+ getBlockRes := alice.RPC.GetBlock(getBlockReq)
// Deserialize the block which was retrieved by hash.
msgBlock := &wire.MsgBlock{}
@@ -61,8 +63,10 @@ func testChainKitGetBlock(ht *lntest.HarnessTest) {
// testChainKitGetBlockHeader ensures that given a block hash, the RPC endpoint
// returns the correct target block header.
func testChainKitGetBlockHeader(ht *lntest.HarnessTest) {
+ alice := ht.NewNode("Alice", nil)
+
// Get best block hash.
- bestBlockRes := ht.Alice.RPC.GetBestBlock(nil)
+ bestBlockRes := alice.RPC.GetBestBlock(nil)
var (
bestBlockHash chainhash.Hash
@@ -76,7 +80,7 @@ func testChainKitGetBlockHeader(ht *lntest.HarnessTest) {
getBlockReq := &chainrpc.GetBlockRequest{
BlockHash: bestBlockHash[:],
}
- getBlockRes := ht.Alice.RPC.GetBlock(getBlockReq)
+ getBlockRes := alice.RPC.GetBlock(getBlockReq)
// Deserialize the block which was retrieved by hash.
blockReader := bytes.NewReader(getBlockRes.RawBlock)
@@ -87,7 +91,7 @@ func testChainKitGetBlockHeader(ht *lntest.HarnessTest) {
getBlockHeaderReq := &chainrpc.GetBlockHeaderRequest{
BlockHash: bestBlockHash[:],
}
- getBlockHeaderRes := ht.Alice.RPC.GetBlockHeader(getBlockHeaderReq)
+ getBlockHeaderRes := alice.RPC.GetBlockHeader(getBlockHeaderReq)
// Deserialize the block header which was retrieved by hash.
blockHeaderReader := bytes.NewReader(getBlockHeaderRes.RawBlockHeader)
@@ -104,14 +108,16 @@ func testChainKitGetBlockHeader(ht *lntest.HarnessTest) {
// testChainKitGetBlockHash ensures that given a block height, the RPC endpoint
// returns the correct target block hash.
func testChainKitGetBlockHash(ht *lntest.HarnessTest) {
+ alice := ht.NewNode("Alice", nil)
+
// Get best block hash.
- bestBlockRes := ht.Alice.RPC.GetBestBlock(nil)
+ bestBlockRes := alice.RPC.GetBestBlock(nil)
// Retrieve the block hash at best block height.
req := &chainrpc.GetBlockHashRequest{
BlockHeight: int64(bestBlockRes.BlockHeight),
}
- getBlockHashRes := ht.Alice.RPC.GetBlockHash(req)
+ getBlockHashRes := alice.RPC.GetBlockHash(req)
// Ensure best block hash is the same as retrieved block hash.
expected := bestBlockRes.BlockHash
@@ -128,8 +134,7 @@ func testChainKitSendOutputsAnchorReserve(ht *lntest.HarnessTest) {
// NOTE: we cannot reuse the standby node here as the test requires the
// node to start with no UTXOs.
charlie := ht.NewNode("Charlie", args)
- bob := ht.Bob
- ht.RestartNodeWithExtraArgs(bob, args)
+ bob := ht.NewNode("Bob", args)
// We'll start the test by sending Charlie some coins.
fundingAmount := btcutil.Amount(100_000)
@@ -148,7 +153,7 @@ func testChainKitSendOutputsAnchorReserve(ht *lntest.HarnessTest) {
// Charlie opens an anchor channel and keeps twice the amount of the
// anchor reserve in her wallet.
chanAmt := fundingAmount - 2*btcutil.Amount(reserve.RequiredReserve)
- outpoint := ht.OpenChannel(charlie, bob, lntest.OpenChannelParams{
+ ht.OpenChannel(charlie, bob, lntest.OpenChannelParams{
Amt: chanAmt,
CommitmentType: lnrpc.CommitmentType_ANCHORS,
SatPerVByte: 1,
@@ -202,11 +207,7 @@ func testChainKitSendOutputsAnchorReserve(ht *lntest.HarnessTest) {
// This second transaction should be published correctly.
charlie.RPC.SendOutputs(req)
-
ht.MineBlocksAndAssertNumTxes(1, 1)
-
- // Clean up our test setup.
- ht.CloseChannel(charlie, outpoint)
}
// testAnchorReservedValue tests that we won't allow sending transactions when
@@ -216,12 +217,8 @@ func testAnchorReservedValue(ht *lntest.HarnessTest) {
// Start two nodes supporting anchor channels.
args := lntest.NodeArgsForCommitType(lnrpc.CommitmentType_ANCHORS)
- // NOTE: we cannot reuse the standby node here as the test requires the
- // node to start with no UTXOs.
alice := ht.NewNode("Alice", args)
- bob := ht.Bob
- ht.RestartNodeWithExtraArgs(bob, args)
-
+ bob := ht.NewNode("Bob", args)
ht.ConnectNodes(alice, bob)
// Send just enough coins for Alice to open a channel without a change
diff --git a/itest/lnd_open_channel_test.go b/itest/lnd_open_channel_test.go
index a4134abcaf..9d51fc5635 100644
--- a/itest/lnd_open_channel_test.go
+++ b/itest/lnd_open_channel_test.go
@@ -3,8 +3,6 @@ package itest
import (
"fmt"
"strings"
- "testing"
- "time"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg/chainhash"
@@ -19,6 +17,31 @@ import (
"github.com/stretchr/testify/require"
)
+// channelFeePolicyTestCases defines a set of tests to check the update channel
+// policy fee behavior.
+var channelFeePolicyTestCases = []*lntest.TestCase{
+ {
+ Name: "channel fee policy default",
+ TestFunc: testChannelFeePolicyDefault,
+ },
+ {
+ Name: "channel fee policy base fee",
+ TestFunc: testChannelFeePolicyBaseFee,
+ },
+ {
+ Name: "channel fee policy fee rate",
+ TestFunc: testChannelFeePolicyFeeRate,
+ },
+ {
+ Name: "channel fee policy base fee and fee rate",
+ TestFunc: testChannelFeePolicyBaseFeeAndFeeRate,
+ },
+ {
+ Name: "channel fee policy low base fee and fee rate",
+ TestFunc: testChannelFeePolicyLowBaseFeeAndFeeRate,
+ },
+}
+
// testOpenChannelAfterReorg tests that in the case where we have an open
// channel where the funding tx gets reorged out, the channel will no
// longer be present in the node's routing table.
@@ -30,11 +53,16 @@ func testOpenChannelAfterReorg(ht *lntest.HarnessTest) {
ht.Skipf("skipping reorg test for neutrino backend")
}
- // Create a temp miner.
- tempMiner := ht.SpawnTempMiner()
-
miner := ht.Miner()
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNode("Bob", nil)
+ ht.EnsureConnected(alice, bob)
+
+ // Create a temp miner after the creation of Alice.
+ //
+ // NOTE: this is needed since NewNodeWithCoins will mine a block and
+ // the temp miner needs to sync up.
+ tempMiner := ht.SpawnTempMiner()
// Create a new channel that requires 1 confs before it's considered
// open, then broadcast the funding transaction
@@ -84,7 +112,7 @@ func testOpenChannelAfterReorg(ht *lntest.HarnessTest) {
ht.AssertChannelInGraph(bob, chanPoint)
// Alice should now have 1 edge in her graph.
- ht.AssertNumActiveEdges(alice, 1, true)
+ ht.AssertNumEdges(alice, 1, true)
// Now we disconnect Alice's chain backend from the original miner, and
// connect the two miners together. Since the temporary miner knows
@@ -112,41 +140,110 @@ func testOpenChannelAfterReorg(ht *lntest.HarnessTest) {
// Since the fundingtx was reorged out, Alice should now have no edges
// in her graph.
- ht.AssertNumActiveEdges(alice, 0, true)
+ ht.AssertNumEdges(alice, 0, true)
// Cleanup by mining the funding tx again, then closing the channel.
block = ht.MineBlocksAndAssertNumTxes(1, 1)[0]
ht.AssertTxInBlock(block, *fundingTxID)
+}
+
+// testChannelFeePolicyDefault check when no params provided to
+// OpenChannelRequest: ChannelUpdate --> defaultBaseFee, defaultFeeRate.
+func testChannelFeePolicyDefault(ht *lntest.HarnessTest) {
+ const (
+ defaultBaseFee = 1000
+ defaultFeeRate = 1
+ defaultTimeLockDelta = chainreg.DefaultBitcoinTimeLockDelta
+ defaultMinHtlc = 1000
+ )
+
+ defaultMaxHtlc := lntest.CalculateMaxHtlc(funding.MaxBtcFundingAmount)
+
+ chanAmt := funding.MaxBtcFundingAmount
+ pushAmt := chanAmt / 2
+
+ feeScenario := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ PushAmt: pushAmt,
+ UseBaseFee: false,
+ UseFeeRate: false,
+ }
+
+ expectedPolicy := lnrpc.RoutingPolicy{
+ FeeBaseMsat: defaultBaseFee,
+ FeeRateMilliMsat: defaultFeeRate,
+ TimeLockDelta: defaultTimeLockDelta,
+ MinHtlc: defaultMinHtlc,
+ MaxHtlcMsat: defaultMaxHtlc,
+ }
+
+ bobExpectedPolicy := lnrpc.RoutingPolicy{
+ FeeBaseMsat: defaultBaseFee,
+ FeeRateMilliMsat: defaultFeeRate,
+ TimeLockDelta: defaultTimeLockDelta,
+ MinHtlc: defaultMinHtlc,
+ MaxHtlcMsat: defaultMaxHtlc,
+ }
- ht.CloseChannel(alice, chanPoint)
+ runChannelFeePolicyTest(
+ ht, feeScenario, &expectedPolicy, &bobExpectedPolicy,
+ )
}
-// testOpenChannelFeePolicy checks if different channel fee scenarios are
-// correctly handled when the optional channel fee parameters baseFee and
-// feeRate are provided. If the OpenChannelRequest is not provided with a value
-// for baseFee/feeRate the expectation is that the default baseFee/feeRate is
-// applied.
-//
-// 1. No params provided to OpenChannelRequest:
-// ChannelUpdate --> defaultBaseFee, defaultFeeRate
-// 2. Only baseFee provided to OpenChannelRequest:
-// ChannelUpdate --> provided baseFee, defaultFeeRate
-// 3. Only feeRate provided to OpenChannelRequest:
-// ChannelUpdate --> defaultBaseFee, provided FeeRate
-// 4. baseFee and feeRate provided to OpenChannelRequest:
-// ChannelUpdate --> provided baseFee, provided feeRate
-// 5. Both baseFee and feeRate are set to a value lower than the default:
-// ChannelUpdate --> provided baseFee, provided feeRate
-func testOpenChannelUpdateFeePolicy(ht *lntest.HarnessTest) {
+// testChannelFeePolicyBaseFee checks only baseFee provided to
+// OpenChannelRequest: ChannelUpdate --> provided baseFee, defaultFeeRate.
+func testChannelFeePolicyBaseFee(ht *lntest.HarnessTest) {
const (
defaultBaseFee = 1000
defaultFeeRate = 1
defaultTimeLockDelta = chainreg.DefaultBitcoinTimeLockDelta
defaultMinHtlc = 1000
optionalBaseFee = 1337
+ )
+
+ defaultMaxHtlc := lntest.CalculateMaxHtlc(funding.MaxBtcFundingAmount)
+
+ chanAmt := funding.MaxBtcFundingAmount
+ pushAmt := chanAmt / 2
+
+ feeScenario := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ PushAmt: pushAmt,
+ BaseFee: optionalBaseFee,
+ UseBaseFee: true,
+ UseFeeRate: false,
+ }
+
+ expectedPolicy := lnrpc.RoutingPolicy{
+ FeeBaseMsat: optionalBaseFee,
+ FeeRateMilliMsat: defaultFeeRate,
+ TimeLockDelta: defaultTimeLockDelta,
+ MinHtlc: defaultMinHtlc,
+ MaxHtlcMsat: defaultMaxHtlc,
+ }
+
+ bobExpectedPolicy := lnrpc.RoutingPolicy{
+ FeeBaseMsat: defaultBaseFee,
+ FeeRateMilliMsat: defaultFeeRate,
+ TimeLockDelta: defaultTimeLockDelta,
+ MinHtlc: defaultMinHtlc,
+ MaxHtlcMsat: defaultMaxHtlc,
+ }
+
+ runChannelFeePolicyTest(
+ ht, feeScenario, &expectedPolicy, &bobExpectedPolicy,
+ )
+}
+
+// testChannelFeePolicyFeeRate checks if only feeRate provided to
+// OpenChannelRequest: ChannelUpdate --> defaultBaseFee, provided FeeRate.
+func testChannelFeePolicyFeeRate(ht *lntest.HarnessTest) {
+ const (
+ defaultBaseFee = 1000
+ defaultFeeRate = 1
+ defaultTimeLockDelta = chainreg.DefaultBitcoinTimeLockDelta
+ defaultMinHtlc = 1000
optionalFeeRate = 1337
- lowBaseFee = 0
- lowFeeRate = 900
)
defaultMaxHtlc := lntest.CalculateMaxHtlc(funding.MaxBtcFundingAmount)
@@ -154,81 +251,20 @@ func testOpenChannelUpdateFeePolicy(ht *lntest.HarnessTest) {
chanAmt := funding.MaxBtcFundingAmount
pushAmt := chanAmt / 2
- feeScenarios := []lntest.OpenChannelParams{
- {
- Amt: chanAmt,
- PushAmt: pushAmt,
- UseBaseFee: false,
- UseFeeRate: false,
- },
- {
- Amt: chanAmt,
- PushAmt: pushAmt,
- BaseFee: optionalBaseFee,
- UseBaseFee: true,
- UseFeeRate: false,
- },
- {
- Amt: chanAmt,
- PushAmt: pushAmt,
- FeeRate: optionalFeeRate,
- UseBaseFee: false,
- UseFeeRate: true,
- },
- {
- Amt: chanAmt,
- PushAmt: pushAmt,
- BaseFee: optionalBaseFee,
- FeeRate: optionalFeeRate,
- UseBaseFee: true,
- UseFeeRate: true,
- },
- {
- Amt: chanAmt,
- PushAmt: pushAmt,
- BaseFee: lowBaseFee,
- FeeRate: lowFeeRate,
- UseBaseFee: true,
- UseFeeRate: true,
- },
+ feeScenario := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ PushAmt: pushAmt,
+ FeeRate: optionalFeeRate,
+ UseBaseFee: false,
+ UseFeeRate: true,
}
- expectedPolicies := []lnrpc.RoutingPolicy{
- {
- FeeBaseMsat: defaultBaseFee,
- FeeRateMilliMsat: defaultFeeRate,
- TimeLockDelta: defaultTimeLockDelta,
- MinHtlc: defaultMinHtlc,
- MaxHtlcMsat: defaultMaxHtlc,
- },
- {
- FeeBaseMsat: optionalBaseFee,
- FeeRateMilliMsat: defaultFeeRate,
- TimeLockDelta: defaultTimeLockDelta,
- MinHtlc: defaultMinHtlc,
- MaxHtlcMsat: defaultMaxHtlc,
- },
- {
- FeeBaseMsat: defaultBaseFee,
- FeeRateMilliMsat: optionalFeeRate,
- TimeLockDelta: defaultTimeLockDelta,
- MinHtlc: defaultMinHtlc,
- MaxHtlcMsat: defaultMaxHtlc,
- },
- {
- FeeBaseMsat: optionalBaseFee,
- FeeRateMilliMsat: optionalFeeRate,
- TimeLockDelta: defaultTimeLockDelta,
- MinHtlc: defaultMinHtlc,
- MaxHtlcMsat: defaultMaxHtlc,
- },
- {
- FeeBaseMsat: lowBaseFee,
- FeeRateMilliMsat: lowFeeRate,
- TimeLockDelta: defaultTimeLockDelta,
- MinHtlc: defaultMinHtlc,
- MaxHtlcMsat: defaultMaxHtlc,
- },
+ expectedPolicy := lnrpc.RoutingPolicy{
+ FeeBaseMsat: defaultBaseFee,
+ FeeRateMilliMsat: optionalFeeRate,
+ TimeLockDelta: defaultTimeLockDelta,
+ MinHtlc: defaultMinHtlc,
+ MaxHtlcMsat: defaultMaxHtlc,
}
bobExpectedPolicy := lnrpc.RoutingPolicy{
@@ -239,83 +275,169 @@ func testOpenChannelUpdateFeePolicy(ht *lntest.HarnessTest) {
MaxHtlcMsat: defaultMaxHtlc,
}
- // In this basic test, we'll need a third node, Carol, so we can forward
- // a payment through the channel we'll open with the different fee
- // policies.
- carol := ht.NewNode("Carol", nil)
+ runChannelFeePolicyTest(
+ ht, feeScenario, &expectedPolicy, &bobExpectedPolicy,
+ )
+}
- alice, bob := ht.Alice, ht.Bob
- nodes := []*node.HarnessNode{alice, bob, carol}
+// testChannelFeePolicyBaseFeeAndFeeRate checks if baseFee and feeRate provided
+// to OpenChannelRequest: ChannelUpdate --> provided baseFee, provided feeRate.
+func testChannelFeePolicyBaseFeeAndFeeRate(ht *lntest.HarnessTest) {
+ const (
+ defaultBaseFee = 1000
+ defaultFeeRate = 1
+ defaultTimeLockDelta = chainreg.DefaultBitcoinTimeLockDelta
+ defaultMinHtlc = 1000
+ optionalBaseFee = 1337
+ optionalFeeRate = 1337
+ )
- runTestCase := func(ht *lntest.HarnessTest,
- chanParams lntest.OpenChannelParams,
- alicePolicy, bobPolicy *lnrpc.RoutingPolicy) {
+ defaultMaxHtlc := lntest.CalculateMaxHtlc(funding.MaxBtcFundingAmount)
- // Create a channel Alice->Bob.
- chanPoint := ht.OpenChannel(alice, bob, chanParams)
- defer ht.CloseChannel(alice, chanPoint)
+ chanAmt := funding.MaxBtcFundingAmount
+ pushAmt := chanAmt / 2
- // Create a channel Carol->Alice.
- chanPoint2 := ht.OpenChannel(
- carol, alice, lntest.OpenChannelParams{
- Amt: 500000,
- },
- )
- defer ht.CloseChannel(carol, chanPoint2)
+ feeScenario := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ PushAmt: pushAmt,
+ BaseFee: optionalBaseFee,
+ FeeRate: optionalFeeRate,
+ UseBaseFee: true,
+ UseFeeRate: true,
+ }
- // Alice and Bob should see each other's ChannelUpdates,
- // advertising the preferred routing policies.
- assertNodesPolicyUpdate(
- ht, nodes, alice, alicePolicy, chanPoint,
- )
- assertNodesPolicyUpdate(ht, nodes, bob, bobPolicy, chanPoint)
+ expectedPolicy := lnrpc.RoutingPolicy{
+ FeeBaseMsat: optionalBaseFee,
+ FeeRateMilliMsat: optionalFeeRate,
+ TimeLockDelta: defaultTimeLockDelta,
+ MinHtlc: defaultMinHtlc,
+ MaxHtlcMsat: defaultMaxHtlc,
+ }
- // They should now know about the default policies.
- for _, n := range nodes {
- ht.AssertChannelPolicy(
- n, alice.PubKeyStr, alicePolicy, chanPoint,
- )
- ht.AssertChannelPolicy(
- n, bob.PubKeyStr, bobPolicy, chanPoint,
- )
- }
+ bobExpectedPolicy := lnrpc.RoutingPolicy{
+ FeeBaseMsat: defaultBaseFee,
+ FeeRateMilliMsat: defaultFeeRate,
+ TimeLockDelta: defaultTimeLockDelta,
+ MinHtlc: defaultMinHtlc,
+ MaxHtlcMsat: defaultMaxHtlc,
+ }
+
+ runChannelFeePolicyTest(
+ ht, feeScenario, &expectedPolicy, &bobExpectedPolicy,
+ )
+}
+
+// testChannelFeePolicyLowBaseFeeAndFeeRate checks if both baseFee and feeRate
+// are set to a value lower than the default: ChannelUpdate --> provided
+// baseFee, provided feeRate.
+func testChannelFeePolicyLowBaseFeeAndFeeRate(ht *lntest.HarnessTest) {
+ const (
+ defaultBaseFee = 1000
+ defaultFeeRate = 1
+ defaultTimeLockDelta = chainreg.DefaultBitcoinTimeLockDelta
+ defaultMinHtlc = 1000
+ lowBaseFee = 0
+ lowFeeRate = 900
+ )
+
+ defaultMaxHtlc := lntest.CalculateMaxHtlc(funding.MaxBtcFundingAmount)
+
+ chanAmt := funding.MaxBtcFundingAmount
+ pushAmt := chanAmt / 2
- // We should be able to forward a payment from Carol to Bob
- // through the new channel we opened.
- payReqs, _, _ := ht.CreatePayReqs(bob, paymentAmt, 1)
- ht.CompletePaymentRequests(carol, payReqs)
+ feeScenario := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ PushAmt: pushAmt,
+ BaseFee: lowBaseFee,
+ FeeRate: lowFeeRate,
+ UseBaseFee: true,
+ UseFeeRate: true,
+ }
+
+ expectedPolicy := lnrpc.RoutingPolicy{
+ FeeBaseMsat: lowBaseFee,
+ FeeRateMilliMsat: lowFeeRate,
+ TimeLockDelta: defaultTimeLockDelta,
+ MinHtlc: defaultMinHtlc,
+ MaxHtlcMsat: defaultMaxHtlc,
+ }
+
+ bobExpectedPolicy := lnrpc.RoutingPolicy{
+ FeeBaseMsat: defaultBaseFee,
+ FeeRateMilliMsat: defaultFeeRate,
+ TimeLockDelta: defaultTimeLockDelta,
+ MinHtlc: defaultMinHtlc,
+ MaxHtlcMsat: defaultMaxHtlc,
}
- for i, feeScenario := range feeScenarios {
- ht.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
- st := ht.Subtest(t)
- st.EnsureConnected(alice, bob)
+ runChannelFeePolicyTest(
+ ht, feeScenario, &expectedPolicy, &bobExpectedPolicy,
+ )
+}
- st.RestartNode(carol)
+// runChannelFeePolicyTest checks if different channel fee scenarios are
+// correctly handled when the optional channel fee parameters baseFee and
+// feeRate are provided. If the OpenChannelRequest is not provided with a value
+// for baseFee/feeRate the expectation is that the default baseFee/feeRate is
+// applied.
+func runChannelFeePolicyTest(ht *lntest.HarnessTest,
+ chanParams lntest.OpenChannelParams,
+ alicePolicy, bobPolicy *lnrpc.RoutingPolicy) {
- // Because we're using ht.Subtest(), we need to restart
- // any node we have to refresh its runtime context.
- // Otherwise, we'll get a "context canceled" error on
- // RPC calls.
- st.EnsureConnected(alice, carol)
+ // In this basic test, we'll need a third node, Carol, so we can
+ // forward a payment through the channel we'll open with the different
+ // fee policies.
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNode("Bob", nil)
+ carol := ht.NewNodeWithCoins("Carol", nil)
- // Send Carol enough coins to be able to open a channel
- // to Alice.
- ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
+ ht.EnsureConnected(alice, bob)
+ ht.EnsureConnected(alice, carol)
- runTestCase(
- st, feeScenario,
- &expectedPolicies[i], &bobExpectedPolicy,
- )
- })
+ nodes := []*node.HarnessNode{alice, bob, carol}
+
+ // Create a channel Alice->Bob.
+ chanPoint := ht.OpenChannel(alice, bob, chanParams)
+
+ // Create a channel Carol->Alice.
+ ht.OpenChannel(
+ carol, alice, lntest.OpenChannelParams{
+ Amt: 500000,
+ },
+ )
+
+ // Alice and Bob should see each other's ChannelUpdates, advertising
+ // the preferred routing policies.
+ assertNodesPolicyUpdate(
+ ht, nodes, alice, alicePolicy, chanPoint,
+ )
+ assertNodesPolicyUpdate(ht, nodes, bob, bobPolicy, chanPoint)
+
+ // They should now know about the default policies.
+ for _, n := range nodes {
+ ht.AssertChannelPolicy(
+ n, alice.PubKeyStr, alicePolicy, chanPoint,
+ )
+ ht.AssertChannelPolicy(
+ n, bob.PubKeyStr, bobPolicy, chanPoint,
+ )
}
+
+ // We should be able to forward a payment from Carol to Bob
+ // through the new channel we opened.
+ payReqs, _, _ := ht.CreatePayReqs(bob, paymentAmt, 1)
+ ht.CompletePaymentRequests(carol, payReqs)
}
// testBasicChannelCreationAndUpdates tests multiple channel opening and
// closing, and ensures that if a node is subscribed to channel updates they
// will be received correctly for both cooperative and force closed channels.
func testBasicChannelCreationAndUpdates(ht *lntest.HarnessTest) {
- runBasicChannelCreationAndUpdates(ht, ht.Alice, ht.Bob)
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNodeWithCoins("Bob", nil)
+ ht.EnsureConnected(alice, bob)
+
+ runBasicChannelCreationAndUpdates(ht, alice, bob)
}
// runBasicChannelCreationAndUpdates tests multiple channel opening and closing,
@@ -491,34 +613,13 @@ func runBasicChannelCreationAndUpdates(ht *lntest.HarnessTest,
)
}
-// testUpdateOnPendingOpenChannels checks that `update_add_htlc` followed by
-// `channel_ready` is properly handled. In specific, when a node is in a state
-// that it's still processing a remote `channel_ready` message, meanwhile an
-// `update_add_htlc` is received, this HTLC message is cached and settled once
-// processing `channel_ready` is complete.
-func testUpdateOnPendingOpenChannels(ht *lntest.HarnessTest) {
- // Test funder's behavior. Funder sees the channel pending, but fundee
- // sees it active and sends an HTLC.
- ht.Run("pending on funder side", func(t *testing.T) {
- st := ht.Subtest(t)
- testUpdateOnFunderPendingOpenChannels(st)
- })
-
- // Test fundee's behavior. Fundee sees the channel pending, but funder
- // sees it active and sends an HTLC.
- ht.Run("pending on fundee side", func(t *testing.T) {
- st := ht.Subtest(t)
- testUpdateOnFundeePendingOpenChannels(st)
- })
-}
-
// testUpdateOnFunderPendingOpenChannels checks that when the fundee sends an
// `update_add_htlc` followed by `channel_ready` while the funder is still
// processing the fundee's `channel_ready`, the HTLC will be cached and
// eventually settled.
func testUpdateOnFunderPendingOpenChannels(ht *lntest.HarnessTest) {
- // Grab the channel participants.
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNode("Bob", nil)
// Restart Alice with the config so she won't process Bob's
// channel_ready msg immediately.
@@ -535,13 +636,8 @@ func testUpdateOnFunderPendingOpenChannels(ht *lntest.HarnessTest) {
Amt: funding.MaxBtcFundingAmount,
PushAmt: funding.MaxBtcFundingAmount / 2,
}
- pendingChan := ht.OpenChannelAssertPending(alice, bob, params)
- chanPoint := &lnrpc.ChannelPoint{
- FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
- FundingTxidBytes: pendingChan.Txid,
- },
- OutputIndex: pendingChan.OutputIndex,
- }
+ pending := ht.OpenChannelAssertPending(alice, bob, params)
+ chanPoint := lntest.ChanPointFromPendingUpdate(pending)
// Alice and Bob should both consider the channel pending open.
ht.AssertNumPendingOpenChannels(alice, 1)
@@ -559,6 +655,7 @@ func testUpdateOnFunderPendingOpenChannels(ht *lntest.HarnessTest) {
// Bob will consider the channel open as there's no wait time to send
// and receive Alice's channel_ready message.
ht.AssertNumPendingOpenChannels(bob, 0)
+ ht.AssertChannelInGraph(bob, chanPoint)
// Alice and Bob now have different view of the channel. For Bob,
// since the channel_ready messages are processed, he will have a
@@ -591,9 +688,6 @@ func testUpdateOnFunderPendingOpenChannels(ht *lntest.HarnessTest) {
// Once Alice sees the channel as active, she will process the cached
// premature `update_add_htlc` and settles the payment.
ht.AssertPaymentStatusFromStream(bobStream, lnrpc.Payment_SUCCEEDED)
-
- // Close the channel.
- ht.CloseChannel(alice, chanPoint)
}
// testUpdateOnFundeePendingOpenChannels checks that when the funder sends an
@@ -601,8 +695,8 @@ func testUpdateOnFunderPendingOpenChannels(ht *lntest.HarnessTest) {
// processing the funder's `channel_ready`, the HTLC will be cached and
// eventually settled.
func testUpdateOnFundeePendingOpenChannels(ht *lntest.HarnessTest) {
- // Grab the channel participants.
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNode("Bob", nil)
// Restart Bob with the config so he won't process Alice's
// channel_ready msg immediately.
@@ -618,13 +712,8 @@ func testUpdateOnFundeePendingOpenChannels(ht *lntest.HarnessTest) {
params := lntest.OpenChannelParams{
Amt: funding.MaxBtcFundingAmount,
}
- pendingChan := ht.OpenChannelAssertPending(alice, bob, params)
- chanPoint := &lnrpc.ChannelPoint{
- FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
- FundingTxidBytes: pendingChan.Txid,
- },
- OutputIndex: pendingChan.OutputIndex,
- }
+ pending := ht.OpenChannelAssertPending(alice, bob, params)
+ chanPoint := lntest.ChanPointFromPendingUpdate(pending)
// Alice and Bob should both consider the channel pending open.
ht.AssertNumPendingOpenChannels(alice, 1)
@@ -636,6 +725,7 @@ func testUpdateOnFundeePendingOpenChannels(ht *lntest.HarnessTest) {
// Alice will consider the channel open as there's no wait time to send
// and receive Bob's channel_ready message.
ht.AssertNumPendingOpenChannels(alice, 0)
+ ht.AssertChannelInGraph(alice, chanPoint)
// TODO(yy): we've prematurely marked the channel as open before
// processing channel ready messages. We need to mark it as open after
@@ -674,9 +764,6 @@ func testUpdateOnFundeePendingOpenChannels(ht *lntest.HarnessTest) {
// Once Bob sees the channel as active, he will process the cached
// premature `update_add_htlc` and settles the payment.
ht.AssertPaymentStatusFromStream(aliceStream, lnrpc.Payment_SUCCEEDED)
-
- // Close the channel.
- ht.CloseChannel(alice, chanPoint)
}
// verifyCloseUpdate is used to verify that a closed channel update is of the
@@ -744,9 +831,12 @@ func verifyCloseUpdate(chanUpdate *lnrpc.ChannelEventUpdate,
// before the funding transaction is confirmed, that the FundingExpiryBlocks
// field of a PendingChannels decreases.
func testFundingExpiryBlocksOnPending(ht *lntest.HarnessTest) {
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNode("Bob", nil)
+ ht.EnsureConnected(alice, bob)
+
param := lntest.OpenChannelParams{Amt: 100000}
- update := ht.OpenChannelAssertPending(alice, bob, param)
+ ht.OpenChannelAssertPending(alice, bob, param)
// At this point, the channel's funding transaction will have been
// broadcast, but not confirmed. Alice and Bob's nodes should reflect
@@ -767,20 +857,6 @@ func testFundingExpiryBlocksOnPending(ht *lntest.HarnessTest) {
// Mine 1 block to confirm the funding transaction, and then close the
// channel.
ht.MineBlocksAndAssertNumTxes(1, 1)
- chanPoint := lntest.ChanPointFromPendingUpdate(update)
-
- // TODO(yy): remove the sleep once the following bug is fixed.
- //
- // We may get the error `unable to gracefully close channel
- // while peer is offline (try force closing it instead):
- // channel link not found`. This happens because the channel
- // link hasn't been added yet but we now proceed to closing the
- // channel. We may need to revisit how the channel open event
- // is created and make sure the event is only sent after all
- // relevant states have been updated.
- time.Sleep(2 * time.Second)
-
- ht.CloseChannel(alice, chanPoint)
}
// testSimpleTaprootChannelActivation ensures that a simple taproot channel is
@@ -793,9 +869,7 @@ func testSimpleTaprootChannelActivation(ht *lntest.HarnessTest) {
// Make the new set of participants.
alice := ht.NewNode("alice", simpleTaprootChanArgs)
- defer ht.Shutdown(alice)
bob := ht.NewNode("bob", simpleTaprootChanArgs)
- defer ht.Shutdown(bob)
ht.FundCoins(btcutil.SatoshiPerBitcoin, alice)
@@ -832,9 +906,6 @@ func testSimpleTaprootChannelActivation(ht *lntest.HarnessTest) {
// Verify that Alice sees an active channel to Bob.
ht.AssertChannelActive(alice, chanPoint)
-
- // Our test is done and Alice closes her channel to Bob.
- ht.CloseChannel(alice, chanPoint)
}
// testOpenChannelLockedBalance tests that when a funding reservation is
@@ -842,7 +913,6 @@ func testSimpleTaprootChannelActivation(ht *lntest.HarnessTest) {
// up as locked balance in the WalletBalance response.
func testOpenChannelLockedBalance(ht *lntest.HarnessTest) {
var (
- bob = ht.Bob
req *lnrpc.ChannelAcceptRequest
err error
)
@@ -850,6 +920,7 @@ func testOpenChannelLockedBalance(ht *lntest.HarnessTest) {
// Create a new node so we can assert exactly how much fund has been
// locked later.
alice := ht.NewNode("alice", nil)
+ bob := ht.NewNode("bob", nil)
ht.FundCoins(btcutil.SatoshiPerBitcoin, alice)
// Connect the nodes.
diff --git a/itest/lnd_payment_test.go b/itest/lnd_payment_test.go
index 3515cbc946..65a7a6dfc1 100644
--- a/itest/lnd_payment_test.go
+++ b/itest/lnd_payment_test.go
@@ -135,22 +135,32 @@ func testPaymentSucceededHTLCRemoteSwept(ht *lntest.HarnessTest) {
// direct preimage spend.
ht.AssertNumPendingSweeps(bob, 1)
- // Mine a block to trigger the sweep.
- //
- // TODO(yy): remove it once `blockbeat` is implemented.
- ht.MineEmptyBlocks(1)
-
- // Mine Bob's sweeping tx.
- ht.MineBlocksAndAssertNumTxes(1, 1)
-
// Let Alice come back up. Since the channel is now closed, we expect
// different behaviors based on whether the HTLC is a dust.
// - For dust payment, it should be failed now as the HTLC won't go
// onchain.
// - For non-dust payment, it should be marked as succeeded since her
// outgoing htlc is swept by Bob.
+ //
+ // TODO(yy): move the restart after Bob's sweeping tx being confirmed
+ // once the blockbeat starts remembering its last processed block and
+ // can handle looking for spends in the past blocks.
require.NoError(ht, restartAlice())
+ // Alice should have a pending force close channel.
+ ht.AssertNumPendingForceClose(alice, 1)
+
+ // Mine a block to trigger the sweep. This is needed because the
+ // preimage extraction logic from the link is not managed by the
+ // blockbeat, which means the preimage may be sent to the contest
+ // resolver after it's launched.
+ //
+ // TODO(yy): Expose blockbeat to the link layer.
+ ht.MineEmptyBlocks(1)
+
+ // Mine Bob's sweeping tx.
+ ht.MineBlocksAndAssertNumTxes(1, 1)
+
// Since Alice is restarted, we need to track the payments again.
payStream := alice.RPC.TrackPaymentV2(payHash[:])
dustPayStream := alice.RPC.TrackPaymentV2(dustPayHash[:])
@@ -169,21 +179,19 @@ func testPaymentSucceededHTLCRemoteSwept(ht *lntest.HarnessTest) {
// out and claimed onchain via the timeout path, the payment will be marked as
// failed. This test creates a topology from Alice -> Bob, and let Alice send
// payments to Bob. Bob then goes offline, such that Alice's outgoing HTLC will
-// time out. Alice will also be restarted to make sure resumed payments are
-// also marked as failed.
+// time out.
func testPaymentFailedHTLCLocalSwept(ht *lntest.HarnessTest) {
- success := ht.Run("fail payment", func(t *testing.T) {
- st := ht.Subtest(t)
- runTestPaymentHTLCTimeout(st, false)
- })
- if !success {
- return
- }
+ runTestPaymentHTLCTimeout(ht, false)
+}
- ht.Run("fail resumed payment", func(t *testing.T) {
- st := ht.Subtest(t)
- runTestPaymentHTLCTimeout(st, true)
- })
+// testPaymentFailedHTLCLocalSweptResumed checks that when an outgoing HTLC is
+// timed out and claimed onchain via the timeout path, the payment will be
+// marked as failed. This test creates a topology from Alice -> Bob, and let
+// Alice send payments to Bob. Bob then goes offline, such that Alice's
+// outgoing HTLC will time out. Alice will be restarted to make sure resumed
+// payments are also marked as failed.
+func testPaymentFailedHTLCLocalSweptResumed(ht *lntest.HarnessTest) {
+ runTestPaymentHTLCTimeout(ht, true)
}
// runTestPaymentHTLCTimeout is the helper function that actually runs the
@@ -325,9 +333,6 @@ func runTestPaymentHTLCTimeout(ht *lntest.HarnessTest, restartAlice bool) {
// sweep her outgoing HTLC in next block.
ht.MineBlocksAndAssertNumTxes(1, 1)
- // Cleanup the channel.
- ht.CleanupForceClose(alice)
-
// We expect the non-dust payment to marked as failed in Alice's
// database and also from her stream.
ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_FAILED)
@@ -339,7 +344,8 @@ func runTestPaymentHTLCTimeout(ht *lntest.HarnessTest, restartAlice bool) {
// to return floor fee rate(1 sat/vb).
func testSendDirectPayment(ht *lntest.HarnessTest) {
// Grab Alice and Bob's nodes for convenience.
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNodeWithCoins("Bob", nil)
// Create a list of commitment types we want to test.
commitmentTypes := []lnrpc.CommitmentType{
@@ -441,6 +447,9 @@ func testSendDirectPayment(ht *lntest.HarnessTest) {
// Make sure they are connected.
st.EnsureConnected(alice, bob)
+ // TODO(yy): remove this line to fix the ListCoins bug.
+ st.FundCoins(btcutil.SatoshiPerBitcoin, alice)
+
// Open a channel with 100k satoshis between Alice and
// Bob with Alice being the sole funder of the channel.
params := lntest.OpenChannelParams{
@@ -459,7 +468,9 @@ func testSendDirectPayment(ht *lntest.HarnessTest) {
}
func testListPayments(ht *lntest.HarnessTest) {
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNode("Bob", nil)
+ ht.EnsureConnected(alice, bob)
// Check that there are no payments before test.
ht.AssertNumPayments(alice, 0)
@@ -467,9 +478,7 @@ func testListPayments(ht *lntest.HarnessTest) {
// Open a channel with 100k satoshis between Alice and Bob with Alice
// being the sole funder of the channel.
chanAmt := btcutil.Amount(100000)
- chanPoint := ht.OpenChannel(
- alice, bob, lntest.OpenChannelParams{Amt: chanAmt},
- )
+ ht.OpenChannel(alice, bob, lntest.OpenChannelParams{Amt: chanAmt})
// Now that the channel is open, create an invoice for Bob which
// expects a payment of 1000 satoshis from Alice paid via a particular
@@ -628,17 +637,6 @@ func testListPayments(ht *lntest.HarnessTest) {
// Check that there are no payments after test.
ht.AssertNumPayments(alice, 0)
-
- // TODO(yy): remove the sleep once the following bug is fixed.
- // When the invoice is reported settled, the commitment dance is not
- // yet finished, which can cause an error when closing the channel,
- // saying there's active HTLCs. We need to investigate this issue and
- // reverse the order to, first finish the commitment dance, then report
- // the invoice as settled.
- time.Sleep(2 * time.Second)
-
- // Close the channel.
- ht.CloseChannel(alice, chanPoint)
}
// testPaymentFollowingChannelOpen tests that the channel transition from
@@ -651,7 +649,10 @@ func testPaymentFollowingChannelOpen(ht *lntest.HarnessTest) {
channelCapacity := paymentAmt * 1000
// We first establish a channel between Alice and Bob.
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNode("Bob", nil)
+ ht.EnsureConnected(alice, bob)
+
p := lntest.OpenChannelParams{
Amt: channelCapacity,
}
@@ -685,19 +686,6 @@ func testPaymentFollowingChannelOpen(ht *lntest.HarnessTest) {
// Send payment to Bob so that a channel update to disk will be
// executed.
ht.CompletePaymentRequests(alice, []string{bobPayReqs[0]})
-
- // TODO(yy): remove the sleep once the following bug is fixed.
- // When the invoice is reported settled, the commitment dance is not
- // yet finished, which can cause an error when closing the channel,
- // saying there's active HTLCs. We need to investigate this issue and
- // reverse the order to, first finish the commitment dance, then report
- // the invoice as settled.
- time.Sleep(2 * time.Second)
-
- // Finally, immediately close the channel. This function will also
- // block until the channel is closed and will additionally assert the
- // relevant channel closing post conditions.
- ht.CloseChannel(alice, chanPoint)
}
// testAsyncPayments tests the performance of the async payments.
@@ -805,11 +793,6 @@ func runAsyncPayments(ht *lntest.HarnessTest, alice, bob *node.HarnessNode,
ht.Log("\tBenchmark info: Elapsed time: ", timeTaken)
ht.Log("\tBenchmark info: TPS: ",
float64(numInvoices)/timeTaken.Seconds())
-
- // Finally, immediately close the channel. This function will also
- // block until the channel is closed and will additionally assert the
- // relevant channel closing post conditions.
- ht.CloseChannel(alice, chanPoint)
}
// testBidirectionalAsyncPayments tests that nodes are able to send the
@@ -917,17 +900,14 @@ func testBidirectionalAsyncPayments(ht *lntest.HarnessTest) {
// Next query for Bob's and Alice's channel states, in order to confirm
// that all payment have been successfully transmitted.
assertChannelState(ht, bob, chanPoint, bobAmt, aliceAmt)
-
- // Finally, immediately close the channel. This function will also
- // block until the channel is closed and will additionally assert the
- // relevant channel closing post conditions.
- ht.CloseChannel(alice, chanPoint)
}
func testInvoiceSubscriptions(ht *lntest.HarnessTest) {
const chanAmt = btcutil.Amount(500000)
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNode("Bob", nil)
+ ht.EnsureConnected(alice, bob)
// Create a new invoice subscription client for Bob, the notification
// should be dispatched shortly below.
@@ -936,9 +916,7 @@ func testInvoiceSubscriptions(ht *lntest.HarnessTest) {
// Open a channel with 500k satoshis between Alice and Bob with Alice
// being the sole funder of the channel.
- chanPoint := ht.OpenChannel(
- alice, bob, lntest.OpenChannelParams{Amt: chanAmt},
- )
+ ht.OpenChannel(alice, bob, lntest.OpenChannelParams{Amt: chanAmt})
// Next create a new invoice for Bob requesting 1k satoshis.
const paymentAmt = 1000
@@ -1040,16 +1018,6 @@ func testInvoiceSubscriptions(ht *lntest.HarnessTest) {
// At this point, all the invoices should be fully settled.
require.Empty(ht, settledInvoices, "not all invoices settled")
-
- // TODO(yy): remove the sleep once the following bug is fixed.
- // When the invoice is reported settled, the commitment dance is not
- // yet finished, which can cause an error when closing the channel,
- // saying there's active HTLCs. We need to investigate this issue and
- // reverse the order to, first finish the commitment dance, then report
- // the invoice as settled.
- time.Sleep(2 * time.Second)
-
- ht.CloseChannel(alice, chanPoint)
}
// assertChannelState asserts the channel state by checking the values in
@@ -1097,23 +1065,16 @@ func assertChannelState(ht *lntest.HarnessTest, hn *node.HarnessNode,
// 5.) Alice observes a failed OR succeeded payment with failure reason
// FAILURE_REASON_CANCELED which suppresses further payment attempts.
func testPaymentFailureReasonCanceled(ht *lntest.HarnessTest) {
- // Initialize the test context with 3 connected nodes.
- ts := newInterceptorTestScenario(ht)
-
- alice, bob, carol := ts.alice, ts.bob, ts.carol
-
- // Open and wait for channels.
const chanAmt = btcutil.Amount(300000)
p := lntest.OpenChannelParams{Amt: chanAmt}
- reqs := []*lntest.OpenChannelRequest{
- {Local: alice, Remote: bob, Param: p},
- {Local: bob, Remote: carol, Param: p},
- }
- resp := ht.OpenMultiChannelsAsync(reqs)
- cpAB, cpBC := resp[0], resp[1]
- // Make sure Alice is aware of channel Bob=>Carol.
- ht.AssertChannelInGraph(alice, cpBC)
+ // Initialize the test context with 3 connected nodes.
+ cfgs := [][]string{nil, nil, nil}
+
+ // Open and wait for channels.
+ chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, p)
+ alice, bob, carol := nodes[0], nodes[1], nodes[2]
+ cpAB := chanPoints[0]
// Connect the interceptor.
interceptor, cancelInterceptor := bob.RPC.HtlcInterceptor()
@@ -1123,7 +1084,8 @@ func testPaymentFailureReasonCanceled(ht *lntest.HarnessTest) {
// htlc even though the payment context was canceled before invoice
// settlement.
sendPaymentInterceptAndCancel(
- ht, ts, cpAB, routerrpc.ResolveHoldForwardAction_RESUME,
+ ht, alice, bob, carol, cpAB,
+ routerrpc.ResolveHoldForwardAction_RESUME,
lnrpc.Payment_SUCCEEDED, interceptor,
)
@@ -1133,24 +1095,18 @@ func testPaymentFailureReasonCanceled(ht *lntest.HarnessTest) {
// Note that we'd have to reset Alice's mission control if we tested the
// htlc fail case before the htlc resume case.
sendPaymentInterceptAndCancel(
- ht, ts, cpAB, routerrpc.ResolveHoldForwardAction_FAIL,
+ ht, alice, bob, carol, cpAB,
+ routerrpc.ResolveHoldForwardAction_FAIL,
lnrpc.Payment_FAILED, interceptor,
)
-
- // Finally, close channels.
- ht.CloseChannel(alice, cpAB)
- ht.CloseChannel(bob, cpBC)
}
func sendPaymentInterceptAndCancel(ht *lntest.HarnessTest,
- ts *interceptorTestScenario, cpAB *lnrpc.ChannelPoint,
+ alice, bob, carol *node.HarnessNode, cpAB *lnrpc.ChannelPoint,
interceptorAction routerrpc.ResolveHoldForwardAction,
expectedPaymentStatus lnrpc.Payment_PaymentStatus,
interceptor rpc.InterceptorClient) {
- // Prepare the test cases.
- alice, bob, carol := ts.alice, ts.bob, ts.carol
-
// Prepare the test cases.
addResponse := carol.RPC.AddInvoice(&lnrpc.Invoice{
ValueMsat: 1000,
@@ -1220,21 +1176,21 @@ func sendPaymentInterceptAndCancel(ht *lntest.HarnessTest,
// out and claimed onchain via the timeout path, the payment will be marked as
// failed. This test creates a topology from Alice -> Bob, and let Alice send
// payments to Bob. Bob then goes offline, such that Alice's outgoing HTLC will
-// time out. Alice will also be restarted to make sure resumed payments are
-// also marked as failed.
+// time out.
func testSendToRouteFailHTLCTimeout(ht *lntest.HarnessTest) {
- success := ht.Run("fail payment", func(t *testing.T) {
- st := ht.Subtest(t)
- runSendToRouteFailHTLCTimeout(st, false)
- })
- if !success {
- return
- }
+ runSendToRouteFailHTLCTimeout(ht, false)
+}
- ht.Run("fail resumed payment", func(t *testing.T) {
- st := ht.Subtest(t)
- runTestPaymentHTLCTimeout(st, true)
- })
+// testSendToRouteFailHTLCTimeout is similar to
+// testPaymentFailedHTLCLocalSwept. The only difference is the `SendPayment` is
+// replaced with `SendToRouteV2`. It checks that when an outgoing HTLC is timed
+// out and claimed onchain via the timeout path, the payment will be marked as
+// failed. This test creates a topology from Alice -> Bob, and let Alice send
+// payments to Bob. Bob then goes offline, such that Alice's outgoing HTLC will
+// time out. Alice will be restarted to make sure resumed payments are also
+// marked as failed.
+func testSendToRouteFailHTLCTimeoutResumed(ht *lntest.HarnessTest) {
+ runTestPaymentHTLCTimeout(ht, true)
}
// runSendToRouteFailHTLCTimeout is the helper function that actually runs the
diff --git a/itest/lnd_psbt_test.go b/itest/lnd_psbt_test.go
index 438661138b..f8ddc26839 100644
--- a/itest/lnd_psbt_test.go
+++ b/itest/lnd_psbt_test.go
@@ -4,7 +4,6 @@ import (
"bytes"
"encoding/hex"
"testing"
- "time"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcec/v2/ecdsa"
@@ -27,126 +26,89 @@ import (
"github.com/stretchr/testify/require"
)
-// testPsbtChanFunding makes sure a channel can be opened between carol and dave
-// by using a Partially Signed Bitcoin Transaction that funds the channel
-// multisig funding output.
-func testPsbtChanFunding(ht *lntest.HarnessTest) {
- const (
- burnAddr = "bcrt1qxsnqpdc842lu8c0xlllgvejt6rhy49u6fmpgyz"
- )
-
- testCases := []struct {
- name string
- commitmentType lnrpc.CommitmentType
- private bool
- }{
- {
- name: "anchors",
- commitmentType: lnrpc.CommitmentType_ANCHORS,
- private: false,
+// psbtFundingTestCases contains the test cases for funding via PSBT.
+var psbtFundingTestCases = []*lntest.TestCase{
+ {
+ Name: "psbt funding anchor",
+ TestFunc: func(ht *lntest.HarnessTest) {
+ runPsbtChanFunding(
+ ht, false, lnrpc.CommitmentType_ANCHORS,
+ )
},
- {
- name: "simple taproot",
- commitmentType: lnrpc.CommitmentType_SIMPLE_TAPROOT,
-
- // Set this to true once simple taproot channels can be
- // announced to the network.
- private: true,
+ },
+ {
+ Name: "psbt external funding anchor",
+ TestFunc: func(ht *lntest.HarnessTest) {
+ runPsbtChanFundingExternal(
+ ht, false, lnrpc.CommitmentType_ANCHORS,
+ )
},
- }
+ },
+ {
+ Name: "psbt single step funding anchor",
+ TestFunc: func(ht *lntest.HarnessTest) {
+ runPsbtChanFundingSingleStep(
+ ht, false, lnrpc.CommitmentType_ANCHORS,
+ )
+ },
+ },
+ {
+ Name: "psbt funding simple taproot",
+ TestFunc: func(ht *lntest.HarnessTest) {
+ runPsbtChanFunding(
+ ht, true, lnrpc.CommitmentType_SIMPLE_TAPROOT,
+ )
+ },
+ },
+ {
+ Name: "psbt external funding simple taproot",
+ TestFunc: func(ht *lntest.HarnessTest) {
+ runPsbtChanFundingExternal(
+ ht, true, lnrpc.CommitmentType_SIMPLE_TAPROOT,
+ )
+ },
+ },
+ {
+ Name: "psbt single step funding simple taproot",
+ TestFunc: func(ht *lntest.HarnessTest) {
+ runPsbtChanFundingSingleStep(
+ ht, true, lnrpc.CommitmentType_SIMPLE_TAPROOT,
+ )
+ },
+ },
+}
- for _, tc := range testCases {
- tc := tc
-
- success := ht.T.Run(tc.name, func(tt *testing.T) {
- st := ht.Subtest(tt)
-
- args := lntest.NodeArgsForCommitType(tc.commitmentType)
-
- // First, we'll create two new nodes that we'll use to
- // open channels between for this test. Dave gets some
- // coins that will be used to fund the PSBT, just to
- // make sure that Carol has an empty wallet.
- carol := st.NewNode("carol", args)
- dave := st.NewNode("dave", args)
-
- // We just send enough funds to satisfy the anchor
- // channel reserve for 5 channels (50k sats).
- st.FundCoins(50_000, carol)
- st.FundCoins(50_000, dave)
-
- st.RunTestCase(&lntest.TestCase{
- Name: tc.name,
- TestFunc: func(sst *lntest.HarnessTest) {
- runPsbtChanFunding(
- sst, carol, dave, tc.private,
- tc.commitmentType,
- )
- },
- })
+// runPsbtChanFunding makes sure a channel can be opened between carol and dave
+// by using a Partially Signed Bitcoin Transaction that funds the channel
+// multisig funding output.
+func runPsbtChanFunding(ht *lntest.HarnessTest, private bool,
+ commitType lnrpc.CommitmentType) {
- // Empty out the wallets so there aren't any lingering
- // coins.
- sendAllCoinsConfirm(st, carol, burnAddr)
- sendAllCoinsConfirm(st, dave, burnAddr)
-
- // Now we test the second scenario. Again, we just send
- // enough funds to satisfy the anchor channel reserve
- // for 5 channels (50k sats).
- st.FundCoins(50_000, carol)
- st.FundCoins(50_000, dave)
-
- st.RunTestCase(&lntest.TestCase{
- Name: tc.name,
- TestFunc: func(sst *lntest.HarnessTest) {
- runPsbtChanFundingExternal(
- sst, carol, dave, tc.private,
- tc.commitmentType,
- )
- },
- })
+ args := lntest.NodeArgsForCommitType(commitType)
- // Empty out the wallets a last time, so there aren't
- // any lingering coins.
- sendAllCoinsConfirm(st, carol, burnAddr)
- sendAllCoinsConfirm(st, dave, burnAddr)
-
- // The last test case tests the anchor channel reserve
- // itself, so we need empty wallets.
- st.RunTestCase(&lntest.TestCase{
- Name: tc.name,
- TestFunc: func(sst *lntest.HarnessTest) {
- runPsbtChanFundingSingleStep(
- sst, carol, dave, tc.private,
- tc.commitmentType,
- )
- },
- })
- })
- if !success {
- // Log failure time to help relate the lnd logs to the
- // failure.
- ht.Logf("Failure time: %v", time.Now().Format(
- "2006-01-02 15:04:05.000",
- ))
+ // First, we'll create two new nodes that we'll use to open channels
+ // between for this test. Dave gets some coins that will be used to
+ // fund the PSBT, just to make sure that Carol has an empty wallet.
+ carol := ht.NewNode("carol", args)
+ dave := ht.NewNode("dave", args)
- break
- }
- }
+ // We just send enough funds to satisfy the anchor channel reserve for
+ // 5 channels (50k sats).
+ ht.FundCoins(50_000, carol)
+ ht.FundCoins(50_000, dave)
+
+ runPsbtChanFundingWithNodes(ht, carol, dave, private, commitType)
}
-// runPsbtChanFunding makes sure a channel can be opened between carol and dave
-// by using a Partially Signed Bitcoin Transaction that funds the channel
-// multisig funding output.
-func runPsbtChanFunding(ht *lntest.HarnessTest, carol, dave *node.HarnessNode,
- private bool, commitType lnrpc.CommitmentType) {
+func runPsbtChanFundingWithNodes(ht *lntest.HarnessTest, carol,
+ dave *node.HarnessNode, private bool, commitType lnrpc.CommitmentType) {
const chanSize = funding.MaxBtcFundingAmount
ht.FundCoins(btcutil.SatoshiPerBitcoin, dave)
// Before we start the test, we'll ensure both sides are connected so
// the funding flow can be properly executed.
- alice := ht.Alice
+ alice := ht.NewNodeWithCoins("Alice", nil)
ht.EnsureConnected(carol, dave)
ht.EnsureConnected(carol, alice)
@@ -307,6 +269,9 @@ func runPsbtChanFunding(ht *lntest.HarnessTest, carol, dave *node.HarnessNode,
txHash := finalTx.TxHash()
block := ht.MineBlocksAndAssertNumTxes(6, 1)[0]
ht.AssertTxInBlock(block, txHash)
+
+ ht.AssertChannelActive(carol, chanPoint)
+ ht.AssertChannelActive(carol, chanPoint2)
ht.AssertChannelInGraph(carol, chanPoint)
ht.AssertChannelInGraph(carol, chanPoint2)
@@ -324,27 +289,33 @@ func runPsbtChanFunding(ht *lntest.HarnessTest, carol, dave *node.HarnessNode,
}
resp := dave.RPC.AddInvoice(invoice)
ht.CompletePaymentRequests(carol, []string{resp.PaymentRequest})
-
- // To conclude, we'll close the newly created channel between Carol and
- // Dave. This function will also block until the channel is closed and
- // will additionally assert the relevant channel closing post
- // conditions.
- ht.CloseChannel(carol, chanPoint)
- ht.CloseChannel(carol, chanPoint2)
}
// runPsbtChanFundingExternal makes sure a channel can be opened between carol
// and dave by using a Partially Signed Bitcoin Transaction that funds the
// channel multisig funding output and is fully funded by an external third
// party.
-func runPsbtChanFundingExternal(ht *lntest.HarnessTest, carol,
- dave *node.HarnessNode, private bool, commitType lnrpc.CommitmentType) {
+func runPsbtChanFundingExternal(ht *lntest.HarnessTest, private bool,
+ commitType lnrpc.CommitmentType) {
+
+ args := lntest.NodeArgsForCommitType(commitType)
+
+ // First, we'll create two new nodes that we'll use to open channels
+ // between for this test. Dave gets some coins that will be used to
+ // fund the PSBT, just to make sure that Carol has an empty wallet.
+ carol := ht.NewNode("carol", args)
+ dave := ht.NewNode("dave", args)
+
+ // We just send enough funds to satisfy the anchor channel reserve for
+ // 5 channels (50k sats).
+ ht.FundCoins(50_000, carol)
+ ht.FundCoins(50_000, dave)
const chanSize = funding.MaxBtcFundingAmount
// Before we start the test, we'll ensure both sides are connected so
// the funding flow can be properly executed.
- alice := ht.Alice
+ alice := ht.NewNodeWithCoins("Alice", nil)
ht.EnsureConnected(carol, dave)
ht.EnsureConnected(carol, alice)
@@ -499,26 +470,25 @@ func runPsbtChanFundingExternal(ht *lntest.HarnessTest, carol,
}
resp := dave.RPC.AddInvoice(invoice)
ht.CompletePaymentRequests(carol, []string{resp.PaymentRequest})
-
- // To conclude, we'll close the newly created channel between Carol and
- // Dave. This function will also block until the channels are closed and
- // will additionally assert the relevant channel closing post
- // conditions.
- ht.CloseChannel(carol, chanPoint)
- ht.CloseChannel(carol, chanPoint2)
}
// runPsbtChanFundingSingleStep checks whether PSBT funding works also when
// the wallet of both nodes are empty and one of them uses PSBT and an external
// wallet to fund the channel while creating reserve output in the same
// transaction.
-func runPsbtChanFundingSingleStep(ht *lntest.HarnessTest, carol,
- dave *node.HarnessNode, private bool, commitType lnrpc.CommitmentType) {
+func runPsbtChanFundingSingleStep(ht *lntest.HarnessTest, private bool,
+ commitType lnrpc.CommitmentType) {
+
+ args := lntest.NodeArgsForCommitType(commitType)
+
+ // First, we'll create two new nodes that we'll use to open channels
+ // between for this test.
+ carol := ht.NewNode("carol", args)
+ dave := ht.NewNode("dave", args)
const chanSize = funding.MaxBtcFundingAmount
- alice := ht.Alice
- ht.FundCoins(btcutil.SatoshiPerBitcoin, alice)
+ alice := ht.NewNodeWithCoins("Alice", nil)
// Get new address for anchor reserve.
req := &lnrpc.NewAddressRequest{
@@ -650,12 +620,6 @@ func runPsbtChanFundingSingleStep(ht *lntest.HarnessTest, carol,
}
resp := dave.RPC.AddInvoice(invoice)
ht.CompletePaymentRequests(carol, []string{resp.PaymentRequest})
-
- // To conclude, we'll close the newly created channel between Carol and
- // Dave. This function will also block until the channel is closed and
- // will additionally assert the relevant channel closing post
- // conditions.
- ht.CloseChannel(carol, chanPoint)
}
// testSignPsbt tests that the SignPsbt RPC works correctly.
@@ -697,7 +661,8 @@ func testSignPsbt(ht *lntest.HarnessTest) {
for _, tc := range psbtTestRunners {
succeed := ht.Run(tc.name, func(t *testing.T) {
st := ht.Subtest(t)
- tc.runner(st, st.Alice)
+ alice := st.NewNodeWithCoins("Alice", nil)
+ tc.runner(st, alice)
})
// Abort the test if failed.
@@ -1088,6 +1053,9 @@ func runFundAndSignPsbt(ht *lntest.HarnessTest, alice *node.HarnessNode) {
// a PSBT that already specifies an input but where the user still wants the
// wallet to perform coin selection.
func testFundPsbt(ht *lntest.HarnessTest) {
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNodeWithCoins("Bob", nil)
+
// We test a pay-join between Alice and Bob. Bob wants to send Alice
// 5 million Satoshis in a non-obvious way. So Bob selects a UTXO that's
// bigger than 5 million Satoshis and expects the change minus the send
@@ -1095,20 +1063,20 @@ func testFundPsbt(ht *lntest.HarnessTest) {
// combines her change with the 5 million Satoshis from Bob. With this
// Alice ends up paying the fees for a transfer to her.
const sendAmount = 5_000_000
- aliceAddr := ht.Alice.RPC.NewAddress(&lnrpc.NewAddressRequest{
+ aliceAddr := alice.RPC.NewAddress(&lnrpc.NewAddressRequest{
Type: lnrpc.AddressType_TAPROOT_PUBKEY,
})
- bobAddr := ht.Bob.RPC.NewAddress(&lnrpc.NewAddressRequest{
+ bobAddr := bob.RPC.NewAddress(&lnrpc.NewAddressRequest{
Type: lnrpc.AddressType_TAPROOT_PUBKEY,
})
- ht.Alice.UpdateState()
- ht.Bob.UpdateState()
- aliceStartBalance := ht.Alice.State.Wallet.TotalBalance
- bobStartBalance := ht.Bob.State.Wallet.TotalBalance
+ alice.UpdateState()
+ bob.UpdateState()
+ aliceStartBalance := alice.State.Wallet.TotalBalance
+ bobStartBalance := bob.State.Wallet.TotalBalance
var bobUtxo *lnrpc.Utxo
- bobUnspent := ht.Bob.RPC.ListUnspent(&walletrpc.ListUnspentRequest{})
+ bobUnspent := bob.RPC.ListUnspent(&walletrpc.ListUnspentRequest{})
for _, utxo := range bobUnspent.Utxos {
if utxo.AmountSat > sendAmount {
bobUtxo = utxo
@@ -1145,7 +1113,7 @@ func testFundPsbt(ht *lntest.HarnessTest) {
require.NoError(ht, err)
derivation, trDerivation := getAddressBip32Derivation(
- ht, bobUtxo.Address, ht.Bob,
+ ht, bobUtxo.Address, bob,
)
bobUtxoPkScript, _ := hex.DecodeString(bobUtxo.PkScript)
@@ -1165,31 +1133,31 @@ func testFundPsbt(ht *lntest.HarnessTest) {
// We have the template now. Bob basically funds the 5 million Sats to
// send to Alice and Alice now only needs to coin select to pay for the
// fees.
- fundedPacket := fundPsbtCoinSelect(ht, ht.Alice, packet, 1)
+ fundedPacket := fundPsbtCoinSelect(ht, alice, packet, 1)
txFee, err := fundedPacket.GetTxFee()
require.NoError(ht, err)
// We now let Bob sign the transaction.
- signedPacket := signPacket(ht, ht.Bob, fundedPacket)
+ signedPacket := signPacket(ht, bob, fundedPacket)
// And then Alice, which should give us a fully signed TX.
- signedPacket = signPacket(ht, ht.Alice, signedPacket)
+ signedPacket = signPacket(ht, alice, signedPacket)
// We should be able to finalize the PSBT and extract the final TX now.
- extractPublishAndMine(ht, ht.Alice, signedPacket)
+ extractPublishAndMine(ht, alice, signedPacket)
// Make sure the new wallet balances are reflected correctly.
ht.AssertActiveNodesSynced()
- ht.Alice.UpdateState()
- ht.Bob.UpdateState()
+ alice.UpdateState()
+ bob.UpdateState()
require.Equal(
ht, aliceStartBalance+sendAmount-int64(txFee),
- ht.Alice.State.Wallet.TotalBalance,
+ alice.State.Wallet.TotalBalance,
)
require.Equal(
ht, bobStartBalance-sendAmount,
- ht.Bob.State.Wallet.TotalBalance,
+ bob.State.Wallet.TotalBalance,
)
}
@@ -1596,6 +1564,9 @@ func sendAllCoinsToAddrType(ht *lntest.HarnessTest,
// the channel opening. The psbt funding flow is used to simulate this behavior
// because we can easily let the remote peer run into the timeout.
func testPsbtChanFundingFailFlow(ht *lntest.HarnessTest) {
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNodeWithCoins("Bob", nil)
+
const chanSize = funding.MaxBtcFundingAmount
// Decrease the timeout window for the remote peer to accelerate the
@@ -1604,12 +1575,10 @@ func testPsbtChanFundingFailFlow(ht *lntest.HarnessTest) {
"--dev.reservationtimeout=1s",
"--dev.zombiesweeperinterval=1s",
}
- ht.RestartNodeWithExtraArgs(ht.Bob, args)
+ ht.RestartNodeWithExtraArgs(bob, args)
// Before we start the test, we'll ensure both sides are connected so
// the funding flow can be properly executed.
- alice := ht.Alice
- bob := ht.Bob
ht.EnsureConnected(alice, bob)
// At this point, we can begin our PSBT channel funding workflow. We'll
@@ -1696,9 +1665,6 @@ func testPsbtChanFundingWithUnstableUtxos(ht *lntest.HarnessTest) {
// Make sure Carol sees her to_remote output from the force close tx.
ht.AssertNumPendingSweeps(carol, 1)
- // Mine one block to trigger the sweep transaction.
- ht.MineEmptyBlocks(1)
-
// We wait for the to_remote sweep tx.
ht.AssertNumUTXOsUnconfirmed(carol, 1)
@@ -1821,9 +1787,6 @@ func testPsbtChanFundingWithUnstableUtxos(ht *lntest.HarnessTest) {
// Make sure Carol sees her to_remote output from the force close tx.
ht.AssertNumPendingSweeps(carol, 1)
- // Mine one block to trigger the sweep transaction.
- ht.MineEmptyBlocks(1)
-
// We wait for the to_remote sweep tx of channelPoint2.
utxos := ht.AssertNumUTXOsUnconfirmed(carol, 1)
diff --git a/itest/lnd_remote_signer_test.go b/itest/lnd_remote_signer_test.go
index e18e5cb039..6cbda365aa 100644
--- a/itest/lnd_remote_signer_test.go
+++ b/itest/lnd_remote_signer_test.go
@@ -16,6 +16,59 @@ import (
"github.com/stretchr/testify/require"
)
+// remoteSignerTestCases defines a set of test cases to run against the remote
+// signer.
+var remoteSignerTestCases = []*lntest.TestCase{
+ {
+ Name: "remote signer random seed",
+ TestFunc: testRemoteSignerRadomSeed,
+ },
+ {
+ Name: "remote signer account import",
+ TestFunc: testRemoteSignerAccountImport,
+ },
+ {
+ Name: "remote signer channel open",
+ TestFunc: testRemoteSignerChannelOpen,
+ },
+ {
+ Name: "remote signer funding input types",
+ TestFunc: testRemoteSignerChannelFundingInputTypes,
+ },
+ {
+ Name: "remote signer funding async payments",
+ TestFunc: testRemoteSignerAsyncPayments,
+ },
+ {
+ Name: "remote signer funding async payments taproot",
+ TestFunc: testRemoteSignerAsyncPaymentsTaproot,
+ },
+ {
+ Name: "remote signer shared key",
+ TestFunc: testRemoteSignerSharedKey,
+ },
+ {
+ Name: "remote signer bump fee",
+ TestFunc: testRemoteSignerBumpFee,
+ },
+ {
+ Name: "remote signer psbt",
+ TestFunc: testRemoteSignerPSBT,
+ },
+ {
+ Name: "remote signer sign output raw",
+ TestFunc: testRemoteSignerSignOutputRaw,
+ },
+ {
+ Name: "remote signer verify msg",
+ TestFunc: testRemoteSignerSignVerifyMsg,
+ },
+ {
+ Name: "remote signer taproot",
+ TestFunc: testRemoteSignerTaproot,
+ },
+}
+
var (
rootKey = "tprv8ZgxMBicQKsPe6jS4vDm2n7s42Q6MpvghUQqMmSKG7bTZvGKtjrcU3" +
"PGzMNG37yzxywrcdvgkwrr8eYXJmbwdvUNVT4Ucv7ris4jvA7BUmg"
@@ -53,25 +106,115 @@ var (
}}
)
-// testRemoteSigner tests that a watch-only wallet can use a remote signing
-// wallet to perform any signing or ECDH operations.
-func testRemoteSigner(ht *lntest.HarnessTest) {
- type testCase struct {
- name string
- randomSeed bool
- sendCoins bool
- commitType lnrpc.CommitmentType
- fn func(tt *lntest.HarnessTest,
- wo, carol *node.HarnessNode)
+// remoteSignerTestCase defines a test case for the remote signer test suite.
+type remoteSignerTestCase struct {
+ name string
+ randomSeed bool
+ sendCoins bool
+ commitType lnrpc.CommitmentType
+ fn func(tt *lntest.HarnessTest, wo, carol *node.HarnessNode)
+}
+
+// prepareRemoteSignerTest prepares a test case for the remote signer test
+// suite by creating three nodes.
+func prepareRemoteSignerTest(ht *lntest.HarnessTest, tc remoteSignerTestCase) (
+ *node.HarnessNode, *node.HarnessNode, *node.HarnessNode) {
+
+ // Signer is our signing node and has the wallet with the full master
+ // private key. We test that we can create the watch-only wallet from
+ // the exported accounts but also from a static key to make sure the
+ // derivation of the account public keys is correct in both cases.
+ password := []byte("itestpassword")
+ var (
+ signerNodePubKey = nodePubKey
+ watchOnlyAccounts = deriveCustomScopeAccounts(ht.T)
+ signer *node.HarnessNode
+ err error
+ )
+ if !tc.randomSeed {
+ signer = ht.RestoreNodeWithSeed(
+ "Signer", nil, password, nil, rootKey, 0, nil,
+ )
+ } else {
+ signer = ht.NewNode("Signer", nil)
+ signerNodePubKey = signer.PubKeyStr
+
+ rpcAccts := signer.RPC.ListAccounts(
+ &walletrpc.ListAccountsRequest{},
+ )
+
+ watchOnlyAccounts, err = walletrpc.AccountsToWatchOnly(
+ rpcAccts.Accounts,
+ )
+ require.NoError(ht, err)
}
- subTests := []testCase{{
+ var commitArgs []string
+ if tc.commitType == lnrpc.CommitmentType_SIMPLE_TAPROOT {
+ commitArgs = lntest.NodeArgsForCommitType(
+ tc.commitType,
+ )
+ }
+
+ // WatchOnly is the node that has a watch-only wallet and uses the
+ // Signer node for any operation that requires access to private keys.
+ watchOnly := ht.NewNodeRemoteSigner(
+ "WatchOnly", append([]string{
+ "--remotesigner.enable",
+ fmt.Sprintf(
+ "--remotesigner.rpchost=localhost:%d",
+ signer.Cfg.RPCPort,
+ ),
+ fmt.Sprintf(
+ "--remotesigner.tlscertpath=%s",
+ signer.Cfg.TLSCertPath,
+ ),
+ fmt.Sprintf(
+ "--remotesigner.macaroonpath=%s",
+ signer.Cfg.AdminMacPath,
+ ),
+ }, commitArgs...),
+ password, &lnrpc.WatchOnly{
+ MasterKeyBirthdayTimestamp: 0,
+ MasterKeyFingerprint: nil,
+ Accounts: watchOnlyAccounts,
+ },
+ )
+
+ resp := watchOnly.RPC.GetInfo()
+ require.Equal(ht, signerNodePubKey, resp.IdentityPubkey)
+
+ if tc.sendCoins {
+ ht.FundCoins(btcutil.SatoshiPerBitcoin, watchOnly)
+ ht.AssertWalletAccountBalance(
+ watchOnly, "default",
+ btcutil.SatoshiPerBitcoin, 0,
+ )
+ }
+
+ carol := ht.NewNode("carol", commitArgs)
+ ht.EnsureConnected(watchOnly, carol)
+
+ return signer, watchOnly, carol
+}
+
+// testRemoteSignerRadomSeed tests that a watch-only wallet can use a remote
+// signing wallet to perform any signing or ECDH operations.
+func testRemoteSignerRadomSeed(ht *lntest.HarnessTest) {
+ tc := remoteSignerTestCase{
name: "random seed",
randomSeed: true,
fn: func(tt *lntest.HarnessTest, wo, carol *node.HarnessNode) {
// Nothing more to test here.
},
- }, {
+ }
+
+ _, watchOnly, carol := prepareRemoteSignerTest(ht, tc)
+ tc.fn(ht, watchOnly, carol)
+}
+
+func testRemoteSignerAccountImport(ht *lntest.HarnessTest) {
+ tc := remoteSignerTestCase{
name: "account import",
fn: func(tt *lntest.HarnessTest, wo, carol *node.HarnessNode) {
runWalletImportAccountScenario(
@@ -79,25 +222,53 @@ func testRemoteSigner(ht *lntest.HarnessTest) {
carol, wo,
)
},
- }, {
+ }
+
+ _, watchOnly, carol := prepareRemoteSignerTest(ht, tc)
+ tc.fn(ht, watchOnly, carol)
+}
+
+func testRemoteSignerChannelOpen(ht *lntest.HarnessTest) {
+ tc := remoteSignerTestCase{
name: "basic channel open close",
sendCoins: true,
fn: func(tt *lntest.HarnessTest, wo, carol *node.HarnessNode) {
runBasicChannelCreationAndUpdates(tt, wo, carol)
},
- }, {
+ }
+
+ _, watchOnly, carol := prepareRemoteSignerTest(ht, tc)
+ tc.fn(ht, watchOnly, carol)
+}
+
+func testRemoteSignerChannelFundingInputTypes(ht *lntest.HarnessTest) {
+ tc := remoteSignerTestCase{
name: "channel funding input types",
sendCoins: false,
fn: func(tt *lntest.HarnessTest, wo, carol *node.HarnessNode) {
runChannelFundingInputTypes(tt, carol, wo)
},
- }, {
+ }
+
+ _, watchOnly, carol := prepareRemoteSignerTest(ht, tc)
+ tc.fn(ht, watchOnly, carol)
+}
+
+func testRemoteSignerAsyncPayments(ht *lntest.HarnessTest) {
+ tc := remoteSignerTestCase{
name: "async payments",
sendCoins: true,
fn: func(tt *lntest.HarnessTest, wo, carol *node.HarnessNode) {
runAsyncPayments(tt, wo, carol, nil)
},
- }, {
+ }
+
+ _, watchOnly, carol := prepareRemoteSignerTest(ht, tc)
+ tc.fn(ht, watchOnly, carol)
+}
+
+func testRemoteSignerAsyncPaymentsTaproot(ht *lntest.HarnessTest) {
+ tc := remoteSignerTestCase{
name: "async payments taproot",
sendCoins: true,
fn: func(tt *lntest.HarnessTest, wo, carol *node.HarnessNode) {
@@ -108,22 +279,43 @@ func testRemoteSigner(ht *lntest.HarnessTest) {
)
},
commitType: lnrpc.CommitmentType_SIMPLE_TAPROOT,
- }, {
+ }
+
+ _, watchOnly, carol := prepareRemoteSignerTest(ht, tc)
+ tc.fn(ht, watchOnly, carol)
+}
+
+func testRemoteSignerSharedKey(ht *lntest.HarnessTest) {
+ tc := remoteSignerTestCase{
name: "shared key",
fn: func(tt *lntest.HarnessTest, wo, carol *node.HarnessNode) {
runDeriveSharedKey(tt, wo)
},
- }, {
+ }
+
+ _, watchOnly, carol := prepareRemoteSignerTest(ht, tc)
+ tc.fn(ht, watchOnly, carol)
+}
+
+func testRemoteSignerBumpFee(ht *lntest.HarnessTest) {
+ tc := remoteSignerTestCase{
name: "bumpfee",
sendCoins: true,
fn: func(tt *lntest.HarnessTest, wo, carol *node.HarnessNode) {
runBumpFee(tt, wo)
},
- }, {
+ }
+
+ _, watchOnly, carol := prepareRemoteSignerTest(ht, tc)
+ tc.fn(ht, watchOnly, carol)
+}
+
+func testRemoteSignerPSBT(ht *lntest.HarnessTest) {
+ tc := remoteSignerTestCase{
name: "psbt",
randomSeed: true,
fn: func(tt *lntest.HarnessTest, wo, carol *node.HarnessNode) {
- runPsbtChanFunding(
+ runPsbtChanFundingWithNodes(
tt, carol, wo, false,
lnrpc.CommitmentType_LEGACY,
)
@@ -137,19 +329,40 @@ func testRemoteSigner(ht *lntest.HarnessTest) {
// sure we can fund and then sign PSBTs from our wallet.
runFundAndSignPsbt(ht, wo)
},
- }, {
+ }
+
+ _, watchOnly, carol := prepareRemoteSignerTest(ht, tc)
+ tc.fn(ht, watchOnly, carol)
+}
+
+func testRemoteSignerSignOutputRaw(ht *lntest.HarnessTest) {
+ tc := remoteSignerTestCase{
name: "sign output raw",
sendCoins: true,
fn: func(tt *lntest.HarnessTest, wo, carol *node.HarnessNode) {
runSignOutputRaw(tt, wo)
},
- }, {
+ }
+
+ _, watchOnly, carol := prepareRemoteSignerTest(ht, tc)
+ tc.fn(ht, watchOnly, carol)
+}
+
+func testRemoteSignerSignVerifyMsg(ht *lntest.HarnessTest) {
+ tc := remoteSignerTestCase{
name: "sign verify msg",
sendCoins: true,
fn: func(tt *lntest.HarnessTest, wo, carol *node.HarnessNode) {
runSignVerifyMessage(tt, wo)
},
- }, {
+ }
+
+ _, watchOnly, carol := prepareRemoteSignerTest(ht, tc)
+ tc.fn(ht, watchOnly, carol)
+}
+
+func testRemoteSignerTaproot(ht *lntest.HarnessTest) {
+ tc := remoteSignerTestCase{
name: "taproot",
sendCoins: true,
randomSeed: true,
@@ -175,107 +388,10 @@ func testRemoteSigner(ht *lntest.HarnessTest) {
)
}
},
- }}
-
- prepareTest := func(st *lntest.HarnessTest,
- subTest testCase) (*node.HarnessNode,
- *node.HarnessNode, *node.HarnessNode) {
-
- // Signer is our signing node and has the wallet with the full
- // master private key. We test that we can create the watch-only
- // wallet from the exported accounts but also from a static key
- // to make sure the derivation of the account public keys is
- // correct in both cases.
- password := []byte("itestpassword")
- var (
- signerNodePubKey = nodePubKey
- watchOnlyAccounts = deriveCustomScopeAccounts(ht.T)
- signer *node.HarnessNode
- err error
- )
- if !subTest.randomSeed {
- signer = st.RestoreNodeWithSeed(
- "Signer", nil, password, nil, rootKey, 0, nil,
- )
- } else {
- signer = st.NewNode("Signer", nil)
- signerNodePubKey = signer.PubKeyStr
-
- rpcAccts := signer.RPC.ListAccounts(
- &walletrpc.ListAccountsRequest{},
- )
-
- watchOnlyAccounts, err = walletrpc.AccountsToWatchOnly(
- rpcAccts.Accounts,
- )
- require.NoError(st, err)
- }
-
- var commitArgs []string
- if subTest.commitType == lnrpc.CommitmentType_SIMPLE_TAPROOT {
- commitArgs = lntest.NodeArgsForCommitType(
- subTest.commitType,
- )
- }
-
- // WatchOnly is the node that has a watch-only wallet and uses
- // the Signer node for any operation that requires access to
- // private keys.
- watchOnly := st.NewNodeRemoteSigner(
- "WatchOnly", append([]string{
- "--remotesigner.enable",
- fmt.Sprintf(
- "--remotesigner.rpchost=localhost:%d",
- signer.Cfg.RPCPort,
- ),
- fmt.Sprintf(
- "--remotesigner.tlscertpath=%s",
- signer.Cfg.TLSCertPath,
- ),
- fmt.Sprintf(
- "--remotesigner.macaroonpath=%s",
- signer.Cfg.AdminMacPath,
- ),
- }, commitArgs...),
- password, &lnrpc.WatchOnly{
- MasterKeyBirthdayTimestamp: 0,
- MasterKeyFingerprint: nil,
- Accounts: watchOnlyAccounts,
- },
- )
-
- resp := watchOnly.RPC.GetInfo()
- require.Equal(st, signerNodePubKey, resp.IdentityPubkey)
-
- if subTest.sendCoins {
- st.FundCoins(btcutil.SatoshiPerBitcoin, watchOnly)
- ht.AssertWalletAccountBalance(
- watchOnly, "default",
- btcutil.SatoshiPerBitcoin, 0,
- )
- }
-
- carol := st.NewNode("carol", commitArgs)
- st.EnsureConnected(watchOnly, carol)
-
- return signer, watchOnly, carol
}
- for _, testCase := range subTests {
- subTest := testCase
-
- success := ht.Run(subTest.name, func(tt *testing.T) {
- // Skip the cleanup here as no standby node is used.
- st := ht.Subtest(tt)
-
- _, watchOnly, carol := prepareTest(st, subTest)
- subTest.fn(st, watchOnly, carol)
- })
-
- if !success {
- return
- }
- }
+ _, watchOnly, carol := prepareRemoteSignerTest(ht, tc)
+ tc.fn(ht, watchOnly, carol)
}
// deriveCustomScopeAccounts derives the first 255 default accounts of the custom lnd
diff --git a/itest/lnd_res_handoff_test.go b/itest/lnd_res_handoff_test.go
index dbf286293b..d7bf499905 100644
--- a/itest/lnd_res_handoff_test.go
+++ b/itest/lnd_res_handoff_test.go
@@ -17,7 +17,8 @@ func testResHandoff(ht *lntest.HarnessTest) {
paymentAmt = 50000
)
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNodeWithCoins("Bob", nil)
// First we'll create a channel between Alice and Bob.
ht.EnsureConnected(alice, bob)
@@ -93,6 +94,4 @@ func testResHandoff(ht *lntest.HarnessTest) {
// Assert that Alice's payment failed.
ht.AssertFirstHTLCError(alice, lnrpc.Failure_PERMANENT_CHANNEL_FAILURE)
-
- ht.CloseChannel(alice, chanPointAlice)
}
diff --git a/itest/lnd_rest_api_test.go b/itest/lnd_rest_api_test.go
index ce2884e776..64a87778f7 100644
--- a/itest/lnd_rest_api_test.go
+++ b/itest/lnd_rest_api_test.go
@@ -212,13 +212,13 @@ func testRestAPI(ht *lntest.HarnessTest) {
// Make sure Alice allows all CORS origins. Bob will keep the default.
// We also make sure the ping/pong messages are sent very often, so we
// can test them without waiting half a minute.
- alice, bob := ht.Alice, ht.Bob
- alice.Cfg.ExtraArgs = append(
- alice.Cfg.ExtraArgs, "--restcors=\"*\"",
+ bob := ht.NewNode("Bob", nil)
+ args := []string{
+ "--restcors=\"*\"",
fmt.Sprintf("--ws-ping-interval=%s", pingInterval),
fmt.Sprintf("--ws-pong-wait=%s", pongWait),
- )
- ht.RestartNode(alice)
+ }
+ alice := ht.NewNodeWithCoins("Alice", args)
for _, tc := range testCases {
tc := tc
@@ -237,6 +237,8 @@ func testRestAPI(ht *lntest.HarnessTest) {
}
func wsTestCaseSubscription(ht *lntest.HarnessTest) {
+ alice := ht.NewNode("Alice", nil)
+
// Find out the current best block so we can subscribe to the next one.
hash, height := ht.GetBestBlock()
@@ -246,7 +248,7 @@ func wsTestCaseSubscription(ht *lntest.HarnessTest) {
Height: uint32(height),
}
url := "/v2/chainnotifier/register/blocks"
- c, err := openWebSocket(ht.Alice, url, "POST", req, nil)
+ c, err := openWebSocket(alice, url, "POST", req, nil)
require.NoError(ht, err, "websocket")
defer func() {
err := c.WriteMessage(websocket.CloseMessage, closeMsg)
@@ -326,7 +328,7 @@ func wsTestCaseSubscriptionMacaroon(ht *lntest.HarnessTest) {
// This time we send the macaroon in the special header
// Sec-Websocket-Protocol which is the only header field available to
// browsers when opening a WebSocket.
- alice := ht.Alice
+ alice := ht.NewNode("Alice", nil)
mac, err := alice.ReadMacaroon(
alice.Cfg.AdminMacPath, defaultTimeout,
)
@@ -411,7 +413,7 @@ func wsTestCaseBiDirectionalSubscription(ht *lntest.HarnessTest) {
// This time we send the macaroon in the special header
// Sec-Websocket-Protocol which is the only header field available to
// browsers when opening a WebSocket.
- alice := ht.Alice
+ alice := ht.NewNode("Alice", nil)
mac, err := alice.ReadMacaroon(
alice.Cfg.AdminMacPath, defaultTimeout,
)
@@ -438,7 +440,6 @@ func wsTestCaseBiDirectionalSubscription(ht *lntest.HarnessTest) {
msgChan := make(chan *lnrpc.ChannelAcceptResponse, 1)
errChan := make(chan error)
done := make(chan struct{})
- timeout := time.After(defaultTimeout)
// We want to read messages over and over again. We just accept any
// channels that are opened.
@@ -504,6 +505,7 @@ func wsTestCaseBiDirectionalSubscription(ht *lntest.HarnessTest) {
}
return
}
+ ht.Log("Finish writing message")
// Also send the message on our message channel to make
// sure we count it as successful.
@@ -520,27 +522,30 @@ func wsTestCaseBiDirectionalSubscription(ht *lntest.HarnessTest) {
// Before we start opening channels, make sure the two nodes are
// connected.
- bob := ht.Bob
+ bob := ht.NewNodeWithCoins("Bob", nil)
ht.EnsureConnected(alice, bob)
- // Open 3 channels to make sure multiple requests and responses can be
- // sent over the web socket.
- const numChannels = 3
- for i := 0; i < numChannels; i++ {
- chanPoint := ht.OpenChannel(
- bob, alice, lntest.OpenChannelParams{Amt: 500000},
- )
- defer ht.CloseChannel(bob, chanPoint)
-
+ assertMsgReceived := func() {
select {
case <-msgChan:
case err := <-errChan:
ht.Fatalf("Received error from WS: %v", err)
- case <-timeout:
+ case <-time.After(defaultTimeout):
ht.Fatalf("Timeout before message was received")
}
}
+
+ // Open 3 channels to make sure multiple requests and responses can be
+ // sent over the web socket.
+ ht.OpenChannel(bob, alice, lntest.OpenChannelParams{Amt: 500000})
+ assertMsgReceived()
+
+ ht.OpenChannel(bob, alice, lntest.OpenChannelParams{Amt: 500000})
+ assertMsgReceived()
+
+ ht.OpenChannel(bob, alice, lntest.OpenChannelParams{Amt: 500000})
+ assertMsgReceived()
}
func wsTestPingPongTimeout(ht *lntest.HarnessTest) {
@@ -552,7 +557,7 @@ func wsTestPingPongTimeout(ht *lntest.HarnessTest) {
// This time we send the macaroon in the special header
// Sec-Websocket-Protocol which is the only header field available to
// browsers when opening a WebSocket.
- alice := ht.Alice
+ alice := ht.NewNode("Alice", nil)
mac, err := alice.ReadMacaroon(
alice.Cfg.AdminMacPath, defaultTimeout,
)
diff --git a/itest/lnd_revocation_test.go b/itest/lnd_revocation_test.go
index 8e2638fe98..5074a3f02d 100644
--- a/itest/lnd_revocation_test.go
+++ b/itest/lnd_revocation_test.go
@@ -612,7 +612,7 @@ func revokedCloseRetributionRemoteHodlCase(ht *lntest.HarnessTest,
// transactions will be in the mempool at this point, we pass 0
// as the last argument, indicating we don't care what's in the
// mempool.
- ht.MineBlocks(1)
+ ht.MineEmptyBlocks(1)
err = wait.NoError(func() error {
txid, err := findJusticeTx()
if err != nil {
diff --git a/itest/lnd_route_blinding_test.go b/itest/lnd_route_blinding_test.go
index fd378a25f7..4b6f98d18e 100644
--- a/itest/lnd_route_blinding_test.go
+++ b/itest/lnd_route_blinding_test.go
@@ -5,6 +5,7 @@ import (
"crypto/sha256"
"encoding/hex"
"errors"
+ "fmt"
"time"
"github.com/btcsuite/btcd/btcec/v2"
@@ -19,6 +20,10 @@ import (
"github.com/stretchr/testify/require"
)
+// toLocalCSV is the CSV delay for the node's to_local output. We use a small
+// value to save us from mining blocks.
+var toLocalCSV = 2
+
// testQueryBlindedRoutes tests querying routes to blinded routes. To do this,
// it sets up a nework of Alice - Bob - Carol and creates a mock blinded route
// that uses Carol as the introduction node (plus dummy hops to cover multiple
@@ -26,11 +31,9 @@ import (
// expected. It also includes the edge case of a single-hop blinded route,
// which indicates that the introduction node is the recipient.
func testQueryBlindedRoutes(ht *lntest.HarnessTest) {
- var (
- // Convenience aliases.
- alice = ht.Alice
- bob = ht.Bob
- )
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNodeWithCoins("Bob", nil)
+ ht.EnsureConnected(alice, bob)
// Setup a two hop channel network: Alice -- Bob -- Carol.
// We set our proportional fee for these channels to zero, so that
@@ -311,13 +314,12 @@ func testQueryBlindedRoutes(ht *lntest.HarnessTest) {
require.Len(ht, resp.Routes, 1)
require.Len(ht, resp.Routes[0].Hops, 2)
require.Equal(ht, resp.Routes[0].TotalTimeLock, sendToIntroTimelock)
-
- ht.CloseChannel(alice, chanPointAliceBob)
- ht.CloseChannel(bob, chanPointBobCarol)
}
type blindedForwardTest struct {
ht *lntest.HarnessTest
+ alice *node.HarnessNode
+ bob *node.HarnessNode
carol *node.HarnessNode
dave *node.HarnessNode
channels []*lnrpc.ChannelPoint
@@ -349,11 +351,28 @@ func newBlindedForwardTest(ht *lntest.HarnessTest) (context.Context,
func (b *blindedForwardTest) setupNetwork(ctx context.Context,
withInterceptor bool) {
- carolArgs := []string{"--bitcoin.timelockdelta=18"}
+ carolArgs := []string{
+ "--bitcoin.timelockdelta=18",
+ fmt.Sprintf("--bitcoin.defaultremotedelay=%v", toLocalCSV),
+ }
if withInterceptor {
carolArgs = append(carolArgs, "--requireinterceptor")
}
- b.carol = b.ht.NewNode("Carol", carolArgs)
+
+ daveArgs := []string{
+ "--bitcoin.timelockdelta=18",
+ fmt.Sprintf("--bitcoin.defaultremotedelay=%v", toLocalCSV),
+ }
+ cfgs := [][]string{nil, nil, carolArgs, daveArgs}
+ param := lntest.OpenChannelParams{
+ Amt: chanAmt,
+ }
+
+ // Creates a network with the following topology and liquidity:
+ // Alice (100k)----- Bob (100k) ----- Carol (100k) ----- Dave
+ chanPoints, nodes := b.ht.CreateSimpleNetwork(cfgs, param)
+ b.channels = chanPoints
+ b.alice, b.bob, b.carol, b.dave = nodes[0], nodes[1], nodes[2], nodes[3]
if withInterceptor {
var err error
@@ -362,10 +381,6 @@ func (b *blindedForwardTest) setupNetwork(ctx context.Context,
)
require.NoError(b.ht, err, "interceptor")
}
-
- b.dave = b.ht.NewNode("Dave", []string{"--bitcoin.timelockdelta=18"})
-
- b.channels = setupFourHopNetwork(b.ht, b.carol, b.dave)
}
// buildBlindedPath returns a blinded route from Bob -> Carol -> Dave, with Bob
@@ -395,7 +410,7 @@ func (b *blindedForwardTest) buildBlindedPath() *lnrpc.BlindedPaymentPath {
require.Len(b.ht, payReq.BlindedPaths, 1)
path := payReq.BlindedPaths[0].BlindedPath
require.Len(b.ht, path.BlindedHops, 3)
- require.EqualValues(b.ht, path.IntroductionNode, b.ht.Bob.PubKey[:])
+ require.EqualValues(b.ht, path.IntroductionNode, b.bob.PubKey[:])
return payReq.BlindedPaths[0]
}
@@ -403,10 +418,6 @@ func (b *blindedForwardTest) buildBlindedPath() *lnrpc.BlindedPaymentPath {
// cleanup tears down all channels created by the test and cancels the top
// level context used in the test.
func (b *blindedForwardTest) cleanup() {
- b.ht.CloseChannel(b.ht.Alice, b.channels[0])
- b.ht.CloseChannel(b.ht.Bob, b.channels[1])
- b.ht.CloseChannel(b.carol, b.channels[2])
-
b.cancel()
}
@@ -431,7 +442,7 @@ func (b *blindedForwardTest) createRouteToBlinded(paymentAmt int64,
},
}
- resp := b.ht.Alice.RPC.QueryRoutes(req)
+ resp := b.alice.RPC.QueryRoutes(req)
require.Greater(b.ht, len(resp.Routes), 0, "no routes")
require.Len(b.ht, resp.Routes[0].Hops, 3, "unexpected route length")
@@ -452,7 +463,7 @@ func (b *blindedForwardTest) sendBlindedPayment(ctx context.Context,
ctx, cancel := context.WithTimeout(ctx, time.Hour)
go func() {
- _, err := b.ht.Alice.RPC.Router.SendToRouteV2(ctx, sendReq)
+ _, err := b.alice.RPC.Router.SendToRouteV2(ctx, sendReq)
// We may get a context canceled error when the test is
// finished.
@@ -481,7 +492,7 @@ func (b *blindedForwardTest) sendToRoute(route *lnrpc.Route,
// Let Alice send to the blinded payment path and assert that it
// succeeds/fails.
- htlcAttempt := b.ht.Alice.RPC.SendToRouteV2(sendReq)
+ htlcAttempt := b.alice.RPC.SendToRouteV2(sendReq)
if assertSuccess {
require.Nil(b.ht, htlcAttempt.Failure)
require.Equal(b.ht, htlcAttempt.Status,
@@ -498,7 +509,7 @@ func (b *blindedForwardTest) sendToRoute(route *lnrpc.Route,
require.NoError(b.ht, err)
pmt := b.ht.AssertPaymentStatus(
- b.ht.Alice, preimage, lnrpc.Payment_FAILED,
+ b.alice, preimage, lnrpc.Payment_FAILED,
)
require.Len(b.ht, pmt.Htlcs, 1)
@@ -520,7 +531,7 @@ func (b *blindedForwardTest) drainCarolLiquidity(incoming bool) {
receivingNode := b.dave
if incoming {
- sendingNode = b.ht.Bob
+ sendingNode = b.bob
receivingNode = b.carol
}
@@ -548,62 +559,6 @@ func (b *blindedForwardTest) drainCarolLiquidity(incoming bool) {
b.ht.AssertPaymentStatusFromStream(pmtClient, lnrpc.Payment_SUCCEEDED)
}
-// setupFourHopNetwork creates a network with the following topology and
-// liquidity:
-// Alice (100k)----- Bob (100k) ----- Carol (100k) ----- Dave
-//
-// The funding outpoint for AB / BC / CD are returned in-order.
-func setupFourHopNetwork(ht *lntest.HarnessTest,
- carol, dave *node.HarnessNode) []*lnrpc.ChannelPoint {
-
- const chanAmt = btcutil.Amount(100000)
- var networkChans []*lnrpc.ChannelPoint
-
- // Open a channel with 100k satoshis between Alice and Bob with Alice
- // being the sole funder of the channel.
- chanPointAlice := ht.OpenChannel(
- ht.Alice, ht.Bob, lntest.OpenChannelParams{
- Amt: chanAmt,
- },
- )
- networkChans = append(networkChans, chanPointAlice)
-
- // Create a channel between bob and carol.
- ht.EnsureConnected(ht.Bob, carol)
- chanPointBob := ht.OpenChannel(
- ht.Bob, carol, lntest.OpenChannelParams{
- Amt: chanAmt,
- },
- )
- networkChans = append(networkChans, chanPointBob)
-
- // Fund carol and connect her and dave so that she can create a channel
- // between them.
- ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
- ht.EnsureConnected(carol, dave)
-
- chanPointCarol := ht.OpenChannel(
- carol, dave, lntest.OpenChannelParams{
- Amt: chanAmt,
- },
- )
- networkChans = append(networkChans, chanPointCarol)
-
- // Wait for all nodes to have seen all channels.
- nodes := []*node.HarnessNode{ht.Alice, ht.Bob, carol, dave}
- for _, chanPoint := range networkChans {
- for _, node := range nodes {
- ht.AssertChannelInGraph(node, chanPoint)
- }
- }
-
- return []*lnrpc.ChannelPoint{
- chanPointAlice,
- chanPointBob,
- chanPointCarol,
- }
-}
-
// testBlindedRouteInvoices tests lnd's ability to create a blinded payment path
// which it then inserts into an invoice, sending to an invoice with a blinded
// path and forward payments in a blinded route and finally, receiving the
@@ -616,6 +571,8 @@ func testBlindedRouteInvoices(ht *lntest.HarnessTest) {
// blinded path that uses Bob as an introduction node.
testCase.setupNetwork(ctx, false)
+ alice := testCase.alice
+
// Let Dave add a blinded invoice.
// Add restrictions so that he only ever creates a single blinded path
// from Bob to himself.
@@ -634,7 +591,7 @@ func testBlindedRouteInvoices(ht *lntest.HarnessTest) {
})
// Now let Alice pay the invoice.
- ht.CompletePaymentRequests(ht.Alice, []string{invoice.PaymentRequest})
+ ht.CompletePaymentRequests(alice, []string{invoice.PaymentRequest})
// Let Dave add a blinded invoice.
// Once again let Dave create a blinded invoice.
@@ -661,7 +618,7 @@ func testBlindedRouteInvoices(ht *lntest.HarnessTest) {
require.EqualValues(ht, path.IntroductionNode, testCase.dave.PubKey[:])
// Now let Alice pay the invoice.
- ht.CompletePaymentRequests(ht.Alice, []string{invoice.PaymentRequest})
+ ht.CompletePaymentRequests(alice, []string{invoice.PaymentRequest})
}
// testReceiverBlindedError tests handling of errors from the receiving node in
@@ -739,6 +696,8 @@ func testIntroductionNodeError(ht *lntest.HarnessTest) {
blindedPaymentPath := testCase.buildBlindedPath()
route := testCase.createRouteToBlinded(10_000_000, blindedPaymentPath)
+ bob := testCase.bob
+
// Before we send our payment, drain all of Carol's incoming liquidity
// so that she can't receive the forward from Bob, causing a failure
// at the introduction node.
@@ -746,7 +705,7 @@ func testIntroductionNodeError(ht *lntest.HarnessTest) {
// Subscribe to Bob's HTLC events so that we can observe the payment
// coming in.
- bobEvents := ht.Bob.RPC.SubscribeHtlcEvents()
+ bobEvents := bob.RPC.SubscribeHtlcEvents()
// Once subscribed, the first event will be UNKNOWN.
ht.AssertHtlcEventType(bobEvents, routerrpc.HtlcEvent_UNKNOWN)
@@ -773,11 +732,13 @@ func testDisableIntroductionNode(ht *lntest.HarnessTest) {
blindedPaymentPath := testCase.buildBlindedPath()
route := testCase.createRouteToBlinded(10_000_000, blindedPaymentPath)
+ alice, bob := testCase.alice, testCase.bob
+
// Now, disable route blinding for Bob, then re-connect to Alice.
- ht.RestartNodeWithExtraArgs(ht.Bob, []string{
+ ht.RestartNodeWithExtraArgs(bob, []string{
"--protocol.no-route-blinding",
})
- ht.EnsureConnected(ht.Alice, ht.Bob)
+ ht.EnsureConnected(alice, bob)
// Assert that this fails.
testCase.sendToRoute(route, false)
@@ -801,14 +762,16 @@ func testErrorHandlingOnChainFailure(ht *lntest.HarnessTest) {
50_000_000, blindedPaymentPath,
)
+ alice, bob := testCase.alice, testCase.bob
+
// Once our interceptor is set up, we can send the blinded payment.
cancelPmt := testCase.sendBlindedPayment(ctx, blindedRoute)
defer cancelPmt()
// Wait for the HTLC to be active on Alice and Bob's channels.
hash := sha256.Sum256(testCase.preimage[:])
- ht.AssertOutgoingHTLCActive(ht.Alice, testCase.channels[0], hash[:])
- ht.AssertOutgoingHTLCActive(ht.Bob, testCase.channels[1], hash[:])
+ ht.AssertOutgoingHTLCActive(alice, testCase.channels[0], hash[:])
+ ht.AssertOutgoingHTLCActive(bob, testCase.channels[1], hash[:])
// Intercept the forward on Carol's link, but do not take any action
// so that we have the chance to force close with this HTLC in flight.
@@ -817,46 +780,47 @@ func testErrorHandlingOnChainFailure(ht *lntest.HarnessTest) {
// Force close Bob <-> Carol.
closeStream, _ := ht.CloseChannelAssertPending(
- ht.Bob, testCase.channels[1], true,
+ bob, testCase.channels[1], true,
)
ht.AssertStreamChannelForceClosed(
- ht.Bob, testCase.channels[1], false, closeStream,
+ bob, testCase.channels[1], false, closeStream,
)
// SuspendCarol so that she can't interfere with the resolution of the
// HTLC from now on.
- restartCarol := ht.SuspendNode(testCase.carol)
+ ht.SuspendNode(testCase.carol)
// Mine blocks so that Bob will claim his CSV delayed local commitment,
// we've already mined 1 block so we need one less than our CSV.
- ht.MineBlocks(node.DefaultCSV - 1)
- ht.AssertNumPendingSweeps(ht.Bob, 1)
- ht.MineEmptyBlocks(1)
+ ht.MineBlocks(toLocalCSV - 1)
+ ht.AssertNumPendingSweeps(bob, 1)
ht.MineBlocksAndAssertNumTxes(1, 1)
// Restart bob so that we can test that he's able to recover everything
// he needs to claim a blinded HTLC.
- ht.RestartNode(ht.Bob)
+ ht.RestartNode(bob)
// Mine enough blocks for Bob to trigger timeout of his outgoing HTLC.
// Carol's incoming expiry height is Bob's outgoing so we can use this
// value.
- info := ht.Bob.RPC.GetInfo()
+ info := bob.RPC.GetInfo()
target := carolHTLC.IncomingExpiry - info.BlockHeight
+ ht.Log(carolHTLC.IncomingExpiry, info.BlockHeight, target)
ht.MineBlocks(int(target))
// Wait for Bob's timeout transaction in the mempool, since we've
// suspended Carol we don't need to account for her commitment output
// claim.
- ht.AssertNumPendingSweeps(ht.Bob, 0)
+ ht.AssertNumPendingSweeps(bob, 0)
ht.MineBlocksAndAssertNumTxes(1, 1)
- ht.AssertHTLCNotActive(ht.Bob, testCase.channels[0], hash[:])
- ht.AssertHTLCNotActive(ht.Alice, testCase.channels[0], hash[:])
+ // Assert that the HTLC has cleared.
+ ht.AssertHTLCNotActive(bob, testCase.channels[0], hash[:])
+ ht.AssertHTLCNotActive(alice, testCase.channels[0], hash[:])
// Wait for the HTLC to reflect as failed for Alice.
- paymentStream := ht.Alice.RPC.TrackPaymentV2(hash[:])
+ paymentStream := alice.RPC.TrackPaymentV2(hash[:])
htlcs := ht.ReceiveTrackPayment(paymentStream).Htlcs
require.Len(ht, htlcs, 1)
require.NotNil(ht, htlcs[0].Failure)
@@ -865,27 +829,9 @@ func testErrorHandlingOnChainFailure(ht *lntest.HarnessTest) {
lnrpc.Failure_INVALID_ONION_BLINDING,
)
- // Clean up the rest of our force close: mine blocks so that Bob's CSV
- // expires plus one block to trigger his sweep and then mine it.
- ht.MineBlocks(node.DefaultCSV + 1)
- ht.MineBlocksAndAssertNumTxes(1, 1)
-
- // Bring carol back up so that we can close out the rest of our
- // channels cooperatively. She requires an interceptor to start up
- // so we just re-register our interceptor.
- require.NoError(ht, restartCarol())
- _, err = testCase.carol.RPC.Router.HtlcInterceptor(ctx)
- require.NoError(ht, err, "interceptor")
-
- // Assert that Carol has started up and reconnected to dave so that
- // we can close out channels cooperatively.
- ht.EnsureConnected(testCase.carol, testCase.dave)
-
// Manually close out the rest of our channels and cancel (don't use
// built in cleanup which will try close the already-force-closed
// channel).
- ht.CloseChannel(ht.Alice, testCase.channels[0])
- ht.CloseChannel(testCase.carol, testCase.channels[2])
testCase.cancel()
}
@@ -906,8 +852,8 @@ func testErrorHandlingOnChainFailure(ht *lntest.HarnessTest) {
func testMPPToSingleBlindedPath(ht *lntest.HarnessTest) {
// Create a five-node context consisting of Alice, Bob and three new
// nodes.
- alice, bob := ht.Alice, ht.Bob
-
+ alice := ht.NewNode("Alice", nil)
+ bob := ht.NewNode("Bob", nil)
dave := ht.NewNode("dave", nil)
carol := ht.NewNode("carol", nil)
eve := ht.NewNode("eve", nil)
@@ -921,10 +867,12 @@ func testMPPToSingleBlindedPath(ht *lntest.HarnessTest) {
// Send coins to the nodes and mine 1 blocks to confirm them.
for i := 0; i < 2; i++ {
+ ht.FundCoinsUnconfirmed(btcutil.SatoshiPerBitcoin, alice)
+ ht.FundCoinsUnconfirmed(btcutil.SatoshiPerBitcoin, bob)
ht.FundCoinsUnconfirmed(btcutil.SatoshiPerBitcoin, carol)
ht.FundCoinsUnconfirmed(btcutil.SatoshiPerBitcoin, dave)
ht.FundCoinsUnconfirmed(btcutil.SatoshiPerBitcoin, eve)
- ht.MineBlocksAndAssertNumTxes(1, 3)
+ ht.MineBlocksAndAssertNumTxes(1, 5)
}
const paymentAmt = btcutil.Amount(300000)
@@ -985,7 +933,7 @@ func testMPPToSingleBlindedPath(ht *lntest.HarnessTest) {
}
// Each node should have exactly numPublic edges.
- ht.AssertNumActiveEdges(hn, numPublic, false)
+ ht.AssertNumEdges(hn, numPublic, false)
}
// Make Dave create an invoice with a blinded path for Alice to pay.
@@ -1096,11 +1044,11 @@ func testMPPToSingleBlindedPath(ht *lntest.HarnessTest) {
// between him and the introduction node. So we expect that Carol is chosen as
// the intro node and that one dummy hops is appended.
func testBlindedRouteDummyHops(ht *lntest.HarnessTest) {
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
// Disable route blinding for Bob so that he is never chosen as the
// introduction node.
- ht.RestartNodeWithExtraArgs(bob, []string{
+ bob := ht.NewNodeWithCoins("Bob", []string{
"--protocol.no-route-blinding",
})
@@ -1156,7 +1104,7 @@ func testBlindedRouteDummyHops(ht *lntest.HarnessTest) {
}
// Each node should have exactly 5 edges.
- ht.AssertNumActiveEdges(hn, len(channelPoints), false)
+ ht.AssertNumEdges(hn, len(channelPoints), false)
}
// Make Dave create an invoice with a blinded path for Alice to pay.
@@ -1190,7 +1138,7 @@ func testBlindedRouteDummyHops(ht *lntest.HarnessTest) {
// Now let Alice pay the invoice.
ht.CompletePaymentRequests(
- ht.Alice, []string{invoiceResp.PaymentRequest},
+ alice, []string{invoiceResp.PaymentRequest},
)
// Make sure Dave show the invoice as settled.
@@ -1232,7 +1180,7 @@ func testBlindedRouteDummyHops(ht *lntest.HarnessTest) {
// Now let Alice pay the invoice.
ht.CompletePaymentRequests(
- ht.Alice, []string{invoiceResp.PaymentRequest},
+ alice, []string{invoiceResp.PaymentRequest},
)
// Make sure Dave show the invoice as settled.
@@ -1267,7 +1215,8 @@ func testBlindedRouteDummyHops(ht *lntest.HarnessTest) {
// \ /
// --- Carol ---
func testMPPToMultipleBlindedPaths(ht *lntest.HarnessTest) {
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNodeWithCoins("Bob", nil)
// Create a four-node context consisting of Alice, Bob and three new
// nodes.
@@ -1325,7 +1274,7 @@ func testMPPToMultipleBlindedPaths(ht *lntest.HarnessTest) {
}
// Each node should have exactly 5 edges.
- ht.AssertNumActiveEdges(hn, len(channelPoints), false)
+ ht.AssertNumEdges(hn, len(channelPoints), false)
}
// Ok now make a payment that must be split to succeed.
@@ -1436,6 +1385,8 @@ func testBlindedPaymentHTLCReForward(ht *lntest.HarnessTest) {
// Set up network with carol interceptor.
testCase.setupNetwork(ctx, true)
+ alice, bob := testCase.alice, testCase.bob
+
// Let dave create invoice.
blindedPaymentPath := testCase.buildBlindedPath()
route := testCase.createRouteToBlinded(10_000_000, blindedPaymentPath)
@@ -1452,7 +1403,7 @@ func testBlindedPaymentHTLCReForward(ht *lntest.HarnessTest) {
go func() {
defer close(done)
- htlcAttempt, err := testCase.ht.Alice.RPC.Router.SendToRouteV2(
+ htlcAttempt, err := testCase.alice.RPC.Router.SendToRouteV2(
ctx, sendReq,
)
require.NoError(testCase.ht, err)
@@ -1463,8 +1414,8 @@ func testBlindedPaymentHTLCReForward(ht *lntest.HarnessTest) {
}()
// Wait for the HTLC to be active on Alice and Bob's channels.
- ht.AssertOutgoingHTLCActive(ht.Alice, testCase.channels[0], hash[:])
- ht.AssertOutgoingHTLCActive(ht.Bob, testCase.channels[1], hash[:])
+ ht.AssertOutgoingHTLCActive(alice, testCase.channels[0], hash[:])
+ ht.AssertOutgoingHTLCActive(bob, testCase.channels[1], hash[:])
// Intercept the forward on Carol's link. At this point, we know she
// has received the HTLC and so will persist this packet.
@@ -1492,7 +1443,7 @@ func testBlindedPaymentHTLCReForward(ht *lntest.HarnessTest) {
// Nodes need to be connected otherwise the forwarding of the
// intercepted htlc will fail.
- ht.EnsureConnected(ht.Bob, testCase.carol)
+ ht.EnsureConnected(bob, testCase.carol)
ht.EnsureConnected(testCase.carol, testCase.dave)
// Now that carol and dave are connected signal the forwarding of the
diff --git a/itest/lnd_routing_test.go b/itest/lnd_routing_test.go
index 07adcf559c..c32e021d44 100644
--- a/itest/lnd_routing_test.go
+++ b/itest/lnd_routing_test.go
@@ -20,46 +20,33 @@ import (
"google.golang.org/protobuf/proto"
)
-type singleHopSendToRouteCase struct {
- name string
-
- // streaming tests streaming SendToRoute if true, otherwise tests
- // synchronous SenToRoute.
- streaming bool
-
- // routerrpc submits the request to the routerrpc subserver if true,
- // otherwise submits to the main rpc server.
- routerrpc bool
-}
-
-var singleHopSendToRouteCases = []singleHopSendToRouteCase{
- {
- name: "regular main sync",
- },
+var sendToRouteTestCases = []*lntest.TestCase{
{
- name: "regular main stream",
- streaming: true,
- },
- {
- name: "regular routerrpc sync",
- routerrpc: true,
- },
- {
- name: "mpp main sync",
+ Name: "single hop send to route sync",
+ TestFunc: func(ht *lntest.HarnessTest) {
+ // useStream: false, routerrpc: false.
+ testSingleHopSendToRouteCase(ht, false, false)
+ },
},
{
- name: "mpp main stream",
- streaming: true,
+ Name: "single hop send to route stream",
+ TestFunc: func(ht *lntest.HarnessTest) {
+ // useStream: true, routerrpc: false.
+ testSingleHopSendToRouteCase(ht, true, false)
+ },
},
{
- name: "mpp routerrpc sync",
- routerrpc: true,
+ Name: "single hop send to route v2",
+ TestFunc: func(ht *lntest.HarnessTest) {
+ // useStream: false, routerrpc: true.
+ testSingleHopSendToRouteCase(ht, false, true)
+ },
},
}
-// testSingleHopSendToRoute tests that payments are properly processed through a
-// provided route with a single hop. We'll create the following network
-// topology:
+// testSingleHopSendToRouteCase tests that payments are properly processed
+// through a provided route with a single hop. We'll create the following
+// network topology:
//
// Carol --100k--> Dave
//
@@ -67,19 +54,8 @@ var singleHopSendToRouteCases = []singleHopSendToRouteCase{
// by feeding the route back into the various SendToRoute RPC methods. Here we
// test all three SendToRoute endpoints, forcing each to perform both a regular
// payment and an MPP payment.
-func testSingleHopSendToRoute(ht *lntest.HarnessTest) {
- for _, test := range singleHopSendToRouteCases {
- test := test
-
- ht.Run(test.name, func(t1 *testing.T) {
- st := ht.Subtest(t1)
- testSingleHopSendToRouteCase(st, test)
- })
- }
-}
-
func testSingleHopSendToRouteCase(ht *lntest.HarnessTest,
- test singleHopSendToRouteCase) {
+ useStream, useRPC bool) {
const chanAmt = btcutil.Amount(100000)
const paymentAmtSat = 1000
@@ -101,7 +77,6 @@ func testSingleHopSendToRouteCase(ht *lntest.HarnessTest,
chanPointCarol := ht.OpenChannel(
carol, dave, lntest.OpenChannelParams{Amt: chanAmt},
)
- defer ht.CloseChannel(carol, chanPointCarol)
// Create invoices for Dave, which expect a payment from Carol.
payReqs, rHashes, _ := ht.CreatePayReqs(
@@ -200,11 +175,11 @@ func testSingleHopSendToRouteCase(ht *lntest.HarnessTest,
// synchronously via the routerrpc's SendToRoute, or via the main RPC
// server's SendToRoute streaming or sync calls.
switch {
- case !test.routerrpc && test.streaming:
+ case !useRPC && useStream:
sendToRouteStream()
- case !test.routerrpc && !test.streaming:
+ case !useRPC && !useStream:
sendToRouteSync()
- case test.routerrpc && !test.streaming:
+ case useRPC && !useStream:
sendToRouteRouterRPC()
default:
require.Fail(ht, "routerrpc does not support "+
@@ -317,9 +292,8 @@ func runMultiHopSendToRoute(ht *lntest.HarnessTest, useGraphCache bool) {
opts = append(opts, "--db.no-graph-cache")
}
- alice, bob := ht.Alice, ht.Bob
- ht.RestartNodeWithExtraArgs(alice, opts)
-
+ alice := ht.NewNodeWithCoins("Alice", opts)
+ bob := ht.NewNodeWithCoins("Bob", opts)
ht.EnsureConnected(alice, bob)
const chanAmt = btcutil.Amount(100000)
@@ -329,7 +303,6 @@ func runMultiHopSendToRoute(ht *lntest.HarnessTest, useGraphCache bool) {
chanPointAlice := ht.OpenChannel(
alice, bob, lntest.OpenChannelParams{Amt: chanAmt},
)
- defer ht.CloseChannel(alice, chanPointAlice)
// Create Carol and establish a channel from Bob. Bob is the sole
// funder of the channel with 100k satoshis. The network topology
@@ -341,7 +314,6 @@ func runMultiHopSendToRoute(ht *lntest.HarnessTest, useGraphCache bool) {
chanPointBob := ht.OpenChannel(
bob, carol, lntest.OpenChannelParams{Amt: chanAmt},
)
- defer ht.CloseChannel(carol, chanPointBob)
// Make sure Alice knows the channel between Bob and Carol.
ht.AssertChannelInGraph(alice, chanPointBob)
@@ -417,10 +389,11 @@ func testSendToRouteErrorPropagation(ht *lntest.HarnessTest) {
// Open a channel with 100k satoshis between Alice and Bob with Alice
// being the sole funder of the channel.
- alice, bob := ht.Alice, ht.Bob
- chanPointAlice := ht.OpenChannel(
- alice, bob, lntest.OpenChannelParams{Amt: chanAmt},
- )
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNode("Bob", nil)
+ ht.EnsureConnected(alice, bob)
+
+ ht.OpenChannel(alice, bob, lntest.OpenChannelParams{Amt: chanAmt})
// Create a new nodes (Carol and Charlie), load her with some funds,
// then establish a connection between Carol and Charlie with a channel
@@ -474,8 +447,6 @@ func testSendToRouteErrorPropagation(ht *lntest.HarnessTest) {
require.NoError(ht, err, "payment stream has been closed but fake "+
"route has consumed")
require.Contains(ht, event.PaymentError, "UnknownNextPeer")
-
- ht.CloseChannel(alice, chanPointAlice)
}
// testPrivateChannels tests that a private channel can be used for
@@ -496,7 +467,10 @@ func testPrivateChannels(ht *lntest.HarnessTest) {
// where the 100k channel between Carol and Alice is private.
// Open a channel with 200k satoshis between Alice and Bob.
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNode("Bob", nil)
+ ht.EnsureConnected(alice, bob)
+
chanPointAlice := ht.OpenChannel(
alice, bob, lntest.OpenChannelParams{Amt: chanAmt * 2},
)
@@ -591,18 +565,12 @@ func testPrivateChannels(ht *lntest.HarnessTest) {
// Carol and Alice should know about 4, while Bob and Dave should only
// know about 3, since one channel is private.
- ht.AssertNumActiveEdges(alice, 4, true)
- ht.AssertNumActiveEdges(alice, 3, false)
- ht.AssertNumActiveEdges(bob, 3, true)
- ht.AssertNumActiveEdges(carol, 4, true)
- ht.AssertNumActiveEdges(carol, 3, false)
- ht.AssertNumActiveEdges(dave, 3, true)
-
- // Close all channels.
- ht.CloseChannel(alice, chanPointAlice)
- ht.CloseChannel(dave, chanPointDave)
- ht.CloseChannel(carol, chanPointCarol)
- ht.CloseChannel(carol, chanPointPrivate)
+ ht.AssertNumEdges(alice, 4, true)
+ ht.AssertNumEdges(alice, 3, false)
+ ht.AssertNumEdges(bob, 3, true)
+ ht.AssertNumEdges(carol, 4, true)
+ ht.AssertNumEdges(carol, 3, false)
+ ht.AssertNumEdges(dave, 3, true)
}
// testInvoiceRoutingHints tests that the routing hints for an invoice are
@@ -618,7 +586,10 @@ func testInvoiceRoutingHints(ht *lntest.HarnessTest) {
// throughout this test. We'll include a push amount since we currently
// require channels to have enough remote balance to cover the
// invoice's payment.
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNodeWithCoins("Bob", nil)
+ ht.EnsureConnected(alice, bob)
+
chanPointBob := ht.OpenChannel(
alice, bob, lntest.OpenChannelParams{
Amt: chanAmt,
@@ -633,7 +604,7 @@ func testInvoiceRoutingHints(ht *lntest.HarnessTest) {
carol := ht.NewNode("Carol", nil)
ht.ConnectNodes(alice, carol)
- chanPointCarol := ht.OpenChannel(
+ ht.OpenChannel(
alice, carol, lntest.OpenChannelParams{
Amt: chanAmt,
PushAmt: chanAmt / 2,
@@ -646,7 +617,7 @@ func testInvoiceRoutingHints(ht *lntest.HarnessTest) {
// advertised, otherwise we'd end up leaking information about nodes
// that wish to stay unadvertised.
ht.ConnectNodes(bob, carol)
- chanPointBobCarol := ht.OpenChannel(
+ ht.OpenChannel(
bob, carol, lntest.OpenChannelParams{
Amt: chanAmt,
PushAmt: chanAmt / 2,
@@ -660,7 +631,7 @@ func testInvoiceRoutingHints(ht *lntest.HarnessTest) {
dave := ht.NewNode("Dave", nil)
ht.ConnectNodes(alice, dave)
- chanPointDave := ht.OpenChannel(
+ ht.OpenChannel(
alice, dave, lntest.OpenChannelParams{
Amt: chanAmt,
Private: true,
@@ -673,7 +644,7 @@ func testInvoiceRoutingHints(ht *lntest.HarnessTest) {
// inactive channels.
eve := ht.NewNode("Eve", nil)
ht.ConnectNodes(alice, eve)
- chanPointEve := ht.OpenChannel(
+ ht.OpenChannel(
alice, eve, lntest.OpenChannelParams{
Amt: chanAmt,
PushAmt: chanAmt / 2,
@@ -734,22 +705,13 @@ func testInvoiceRoutingHints(ht *lntest.HarnessTest) {
Private: true,
}
checkInvoiceHints(invoice)
-
- // Now that we've confirmed the routing hints were added correctly, we
- // can close all the channels and shut down all the nodes created.
- ht.CloseChannel(alice, chanPointBob)
- ht.CloseChannel(alice, chanPointCarol)
- ht.CloseChannel(bob, chanPointBobCarol)
- ht.CloseChannel(alice, chanPointDave)
-
- // The channel between Alice and Eve should be force closed since Eve
- // is offline.
- ht.ForceCloseChannel(alice, chanPointEve)
}
// testScidAliasRoutingHints tests that dynamically created aliases via the RPC
// are properly used when routing.
func testScidAliasRoutingHints(ht *lntest.HarnessTest) {
+ bob := ht.NewNodeWithCoins("Bob", nil)
+
const chanAmt = btcutil.Amount(800000)
// Option-scid-alias is opt-in, as is anchors.
@@ -866,8 +828,8 @@ func testScidAliasRoutingHints(ht *lntest.HarnessTest) {
})
// Connect the existing Bob node with Carol via a public channel.
- ht.ConnectNodes(ht.Bob, carol)
- chanPointBC := ht.OpenChannel(ht.Bob, carol, lntest.OpenChannelParams{
+ ht.ConnectNodes(bob, carol)
+ ht.OpenChannel(bob, carol, lntest.OpenChannelParams{
Amt: chanAmt,
PushAmt: chanAmt / 2,
})
@@ -902,7 +864,7 @@ func testScidAliasRoutingHints(ht *lntest.HarnessTest) {
// Now Alice will try to pay to that payment request.
timeout := time.Second * 15
- stream := ht.Bob.RPC.SendPayment(&routerrpc.SendPaymentRequest{
+ stream := bob.RPC.SendPayment(&routerrpc.SendPaymentRequest{
PaymentRequest: payReq,
TimeoutSeconds: int32(timeout.Seconds()),
FeeLimitSat: math.MaxInt64,
@@ -924,15 +886,12 @@ func testScidAliasRoutingHints(ht *lntest.HarnessTest) {
AliasMaps: ephemeralAliasMap,
})
payReq2 := dave.RPC.AddInvoice(invoice).PaymentRequest
- stream2 := ht.Bob.RPC.SendPayment(&routerrpc.SendPaymentRequest{
+ stream2 := bob.RPC.SendPayment(&routerrpc.SendPaymentRequest{
PaymentRequest: payReq2,
TimeoutSeconds: int32(timeout.Seconds()),
FeeLimitSat: math.MaxInt64,
})
ht.AssertPaymentStatusFromStream(stream2, lnrpc.Payment_FAILED)
-
- ht.CloseChannel(carol, chanPointCD)
- ht.CloseChannel(ht.Bob, chanPointBC)
}
// testMultiHopOverPrivateChannels tests that private channels can be used as
@@ -946,7 +905,10 @@ func testMultiHopOverPrivateChannels(ht *lntest.HarnessTest) {
// First, we'll open a private channel between Alice and Bob with Alice
// being the funder.
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNodeWithCoins("Bob", nil)
+ ht.EnsureConnected(alice, bob)
+
chanPointAlice := ht.OpenChannel(
alice, bob, lntest.OpenChannelParams{
Amt: chanAmt,
@@ -956,7 +918,7 @@ func testMultiHopOverPrivateChannels(ht *lntest.HarnessTest) {
// Next, we'll create Carol's node and open a public channel between
// her and Bob with Bob being the funder.
- carol := ht.NewNode("Carol", nil)
+ carol := ht.NewNodeWithCoins("Carol", nil)
ht.ConnectNodes(bob, carol)
chanPointBob := ht.OpenChannel(
bob, carol, lntest.OpenChannelParams{
@@ -971,7 +933,6 @@ func testMultiHopOverPrivateChannels(ht *lntest.HarnessTest) {
// him and Carol with Carol being the funder.
dave := ht.NewNode("Dave", nil)
ht.ConnectNodes(carol, dave)
- ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
chanPointCarol := ht.OpenChannel(
carol, dave, lntest.OpenChannelParams{
@@ -1030,12 +991,6 @@ func testMultiHopOverPrivateChannels(ht *lntest.HarnessTest) {
// Alice should have sent 20k satoshis + fee for two hops to Bob.
ht.AssertAmountPaid("Alice(local) [private=>] Bob(remote)", alice,
chanPointAlice, paymentAmt+baseFee*2, 0)
-
- // At this point, the payment was successful. We can now close all the
- // channels and shutdown the nodes created throughout this test.
- ht.CloseChannel(alice, chanPointAlice)
- ht.CloseChannel(bob, chanPointBob)
- ht.CloseChannel(carol, chanPointCarol)
}
// testQueryRoutes checks the response of queryroutes.
@@ -1048,7 +1003,9 @@ func testQueryRoutes(ht *lntest.HarnessTest) {
const chanAmt = btcutil.Amount(100000)
// Grab Alice and Bob from the standby nodes.
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNodeWithCoins("Bob", nil)
+ ht.EnsureConnected(alice, bob)
// Create Carol and connect her to Bob. We also send her some coins for
// channel opening.
@@ -1071,7 +1028,6 @@ func testQueryRoutes(ht *lntest.HarnessTest) {
resp := ht.OpenMultiChannelsAsync(reqs)
// Extract channel points from the response.
- chanPointAlice := resp[0]
chanPointBob := resp[1]
chanPointCarol := resp[2]
@@ -1182,12 +1138,6 @@ func testQueryRoutes(ht *lntest.HarnessTest) {
// control import function updates appropriately.
testMissionControlCfg(ht.T, alice)
testMissionControlImport(ht, alice, bob.PubKey[:], carol.PubKey[:])
-
- // We clean up the test case by closing channels that were created for
- // the duration of the tests.
- ht.CloseChannel(alice, chanPointAlice)
- ht.CloseChannel(bob, chanPointBob)
- ht.CloseChannel(carol, chanPointCarol)
}
// testMissionControlCfg tests getting and setting of a node's mission control
@@ -1351,7 +1301,10 @@ func testRouteFeeCutoff(ht *lntest.HarnessTest) {
const chanAmt = btcutil.Amount(100000)
// Open a channel between Alice and Bob.
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNodeWithCoins("Bob", nil)
+ ht.EnsureConnected(alice, bob)
+
chanPointAliceBob := ht.OpenChannel(
alice, bob, lntest.OpenChannelParams{Amt: chanAmt},
)
@@ -1511,13 +1464,6 @@ func testRouteFeeCutoff(ht *lntest.HarnessTest) {
},
}
testFeeCutoff(feeLimitFixed)
-
- // Once we're done, close the channels and shut down the nodes created
- // throughout this test.
- ht.CloseChannel(alice, chanPointAliceBob)
- ht.CloseChannel(alice, chanPointAliceCarol)
- ht.CloseChannel(bob, chanPointBobDave)
- ht.CloseChannel(carol, chanPointCarolDave)
}
// testFeeLimitAfterQueryRoutes tests that a payment's fee limit is consistent
@@ -1530,7 +1476,7 @@ func testFeeLimitAfterQueryRoutes(ht *lntest.HarnessTest) {
cfgs, lntest.OpenChannelParams{Amt: chanAmt},
)
alice, bob, carol := nodes[0], nodes[1], nodes[2]
- chanPointAliceBob, chanPointBobCarol := chanPoints[0], chanPoints[1]
+ chanPointAliceBob := chanPoints[0]
// We set an inbound fee discount on Bob's channel to Alice to
// effectively set the outbound fees charged to Carol to zero.
@@ -1589,10 +1535,6 @@ func testFeeLimitAfterQueryRoutes(ht *lntest.HarnessTest) {
// We assert that a route compatible with the fee limit is available.
ht.SendPaymentAssertSettled(alice, sendReq)
-
- // Once we're done, close the channels.
- ht.CloseChannel(alice, chanPointAliceBob)
- ht.CloseChannel(bob, chanPointBobCarol)
}
// computeFee calculates the payment fee as specified in BOLT07.
diff --git a/itest/lnd_send_multi_path_payment_test.go b/itest/lnd_send_multi_path_payment_test.go
deleted file mode 100644
index 935997c8d2..0000000000
--- a/itest/lnd_send_multi_path_payment_test.go
+++ /dev/null
@@ -1,115 +0,0 @@
-package itest
-
-import (
- "encoding/hex"
-
- "github.com/btcsuite/btcd/btcutil"
- "github.com/lightningnetwork/lnd/lnrpc"
- "github.com/lightningnetwork/lnd/lnrpc/routerrpc"
- "github.com/lightningnetwork/lnd/lntest"
- "github.com/stretchr/testify/require"
-)
-
-// testSendMultiPathPayment tests that we are able to successfully route a
-// payment using multiple shards across different paths.
-func testSendMultiPathPayment(ht *lntest.HarnessTest) {
- mts := newMppTestScenario(ht)
-
- const paymentAmt = btcutil.Amount(300000)
-
- // Set up a network with three different paths Alice <-> Bob. Channel
- // capacities are set such that the payment can only succeed if (at
- // least) three paths are used.
- //
- // _ Eve _
- // / \
- // Alice -- Carol ---- Bob
- // \ /
- // \__ Dave ____/
- //
- req := &mppOpenChannelRequest{
- amtAliceCarol: 285000,
- amtAliceDave: 155000,
- amtCarolBob: 200000,
- amtCarolEve: 155000,
- amtDaveBob: 155000,
- amtEveBob: 155000,
- }
- mts.openChannels(req)
- chanPointAliceDave := mts.channelPoints[1]
-
- // Increase Dave's fee to make the test deterministic. Otherwise, it
- // would be unpredictable whether pathfinding would go through Charlie
- // or Dave for the first shard.
- expectedPolicy := &lnrpc.RoutingPolicy{
- FeeBaseMsat: 500_000,
- FeeRateMilliMsat: int64(0.001 * 1_000_000),
- TimeLockDelta: 40,
- MinHtlc: 1000, // default value
- MaxHtlcMsat: 133_650_000,
- }
- mts.dave.UpdateGlobalPolicy(expectedPolicy)
-
- // Make sure Alice has heard it.
- ht.AssertChannelPolicyUpdate(
- mts.alice, mts.dave, expectedPolicy, chanPointAliceDave, false,
- )
-
- // Our first test will be Alice paying Bob using a SendPayment call.
- // Let Bob create an invoice for Alice to pay.
- payReqs, rHashes, invoices := ht.CreatePayReqs(mts.bob, paymentAmt, 1)
-
- rHash := rHashes[0]
- payReq := payReqs[0]
-
- sendReq := &routerrpc.SendPaymentRequest{
- PaymentRequest: payReq,
- MaxParts: 10,
- TimeoutSeconds: 60,
- FeeLimitMsat: noFeeLimitMsat,
- }
- payment := ht.SendPaymentAssertSettled(mts.alice, sendReq)
-
- // Make sure we got the preimage.
- require.Equal(ht, hex.EncodeToString(invoices[0].RPreimage),
- payment.PaymentPreimage, "preimage doesn't match")
-
- // Check that Alice split the payment in at least three shards. Because
- // the hand-off of the htlc to the link is asynchronous (via a mailbox),
- // there is some non-determinism in the process. Depending on whether
- // the new pathfinding round is started before or after the htlc is
- // locked into the channel, different sharding may occur. Therefore we
- // can only check if the number of shards isn't below the theoretical
- // minimum.
- succeeded := 0
- for _, htlc := range payment.Htlcs {
- if htlc.Status == lnrpc.HTLCAttempt_SUCCEEDED {
- succeeded++
- }
- }
-
- const minExpectedShards = 3
- require.GreaterOrEqual(ht, succeeded, minExpectedShards,
- "expected shards not reached")
-
- // Make sure Bob show the invoice as settled for the full amount.
- inv := mts.bob.RPC.LookupInvoice(rHash)
-
- require.EqualValues(ht, paymentAmt, inv.AmtPaidSat,
- "incorrect payment amt")
-
- require.Equal(ht, lnrpc.Invoice_SETTLED, inv.State,
- "Invoice not settled")
-
- settled := 0
- for _, htlc := range inv.Htlcs {
- if htlc.State == lnrpc.InvoiceHTLCState_SETTLED {
- settled++
- }
- }
- require.Equal(ht, succeeded, settled,
- "num of HTLCs wrong")
-
- // Finally, close all channels.
- mts.closeChannels()
-}
diff --git a/itest/lnd_signer_test.go b/itest/lnd_signer_test.go
index 9310699887..ada7534089 100644
--- a/itest/lnd_signer_test.go
+++ b/itest/lnd_signer_test.go
@@ -25,7 +25,9 @@ import (
// the node's pubkey and a customized public key to check the validity of the
// result.
func testDeriveSharedKey(ht *lntest.HarnessTest) {
- runDeriveSharedKey(ht, ht.Alice)
+ alice := ht.NewNode("Alice", nil)
+
+ runDeriveSharedKey(ht, alice)
}
// runDeriveSharedKey checks the ECDH performed by the endpoint
@@ -197,7 +199,9 @@ func runDeriveSharedKey(ht *lntest.HarnessTest, alice *node.HarnessNode) {
// testSignOutputRaw makes sure that the SignOutputRaw RPC can be used with all
// custom ways of specifying the signing key in the key descriptor/locator.
func testSignOutputRaw(ht *lntest.HarnessTest) {
- runSignOutputRaw(ht, ht.Alice)
+ alice := ht.NewNodeWithCoins("Alice", nil)
+
+ runSignOutputRaw(ht, alice)
}
// runSignOutputRaw makes sure that the SignOutputRaw RPC can be used with all
@@ -377,7 +381,9 @@ func assertSignOutputRaw(ht *lntest.HarnessTest,
// all custom flags by verifying with VerifyMessage. Tests both ECDSA and
// Schnorr signatures.
func testSignVerifyMessage(ht *lntest.HarnessTest) {
- runSignVerifyMessage(ht, ht.Alice)
+ alice := ht.NewNode("Alice", nil)
+
+ runSignVerifyMessage(ht, alice)
}
// runSignVerifyMessage makes sure that the SignMessage RPC can be used with
diff --git a/itest/lnd_single_hop_invoice_test.go b/itest/lnd_single_hop_invoice_test.go
index 8051f7bb86..954f8666ef 100644
--- a/itest/lnd_single_hop_invoice_test.go
+++ b/itest/lnd_single_hop_invoice_test.go
@@ -18,10 +18,11 @@ func testSingleHopInvoice(ht *lntest.HarnessTest) {
// Open a channel with 100k satoshis between Alice and Bob with Alice
// being the sole funder of the channel.
chanAmt := btcutil.Amount(100000)
- alice, bob := ht.Alice, ht.Bob
- cp := ht.OpenChannel(
- alice, bob, lntest.OpenChannelParams{Amt: chanAmt},
+ chanPoints, nodes := ht.CreateSimpleNetwork(
+ [][]string{nil, nil}, lntest.OpenChannelParams{Amt: chanAmt},
)
+ cp := chanPoints[0]
+ alice, bob := nodes[0], nodes[1]
// assertAmountPaid is a helper closure that asserts the amount paid by
// Alice and received by Bob are expected.
@@ -136,6 +137,4 @@ func testSingleHopInvoice(ht *lntest.HarnessTest) {
require.EqualValues(ht, 1, hopHint.FeeBaseMsat, "wrong FeeBaseMsat")
require.EqualValues(ht, 20, hopHint.CltvExpiryDelta,
"wrong CltvExpiryDelta")
-
- ht.CloseChannel(alice, cp)
}
diff --git a/itest/lnd_sweep_test.go b/itest/lnd_sweep_test.go
index 17e0910b63..5d76557972 100644
--- a/itest/lnd_sweep_test.go
+++ b/itest/lnd_sweep_test.go
@@ -2,7 +2,6 @@ package itest
import (
"fmt"
- "math"
"time"
"github.com/btcsuite/btcd/btcutil"
@@ -61,10 +60,7 @@ func testSweepCPFPAnchorOutgoingTimeout(ht *lntest.HarnessTest) {
// Set up the fee estimator to return the testing fee rate when the
// conf target is the deadline.
- //
- // TODO(yy): switch to conf when `blockbeat` is in place.
- // ht.SetFeeEstimateWithConf(startFeeRateAnchor, deadlineDeltaAnchor)
- ht.SetFeeEstimate(startFeeRateAnchor)
+ ht.SetFeeEstimateWithConf(startFeeRateAnchor, deadlineDeltaAnchor)
// htlcValue is the outgoing HTLC's value.
htlcValue := invoiceAmt
@@ -117,6 +113,14 @@ func testSweepCPFPAnchorOutgoingTimeout(ht *lntest.HarnessTest) {
ht.FundCoins(btcutil.SatoshiPerBitcoin, bob)
}
+ // Bob should have enough wallet UTXOs here to sweep the HTLC in the
+ // end of this test. However, due to a known issue, Bob's wallet may
+ // report there's no UTXO available. For details,
+ // - https://github.com/lightningnetwork/lnd/issues/8786
+ //
+ // TODO(yy): remove this step once the issue is resolved.
+ ht.FundCoins(btcutil.SatoshiPerBitcoin, bob)
+
// Subscribe the invoice.
streamCarol := carol.RPC.SubscribeSingleInvoice(payHash[:])
@@ -171,52 +175,36 @@ func testSweepCPFPAnchorOutgoingTimeout(ht *lntest.HarnessTest) {
))
ht.MineEmptyBlocks(int(numBlocks))
- // Assert Bob's force closing tx has been broadcast.
- closeTxid := ht.AssertNumTxsInMempool(1)[0]
+ // Assert Bob's force closing tx has been broadcast. We should see two
+ // txns in the mempool:
+ // 1. Bob's force closing tx.
+ // 2. Bob's anchor sweeping tx CPFPing the force close tx.
+ _, sweepTx := ht.AssertForceCloseAndAnchorTxnsInMempool()
// Remember the force close height so we can calculate the deadline
// height.
forceCloseHeight := ht.CurrentHeight()
- // Bob should have two pending sweeps,
- // - anchor sweeping from his local commitment.
- // - anchor sweeping from his remote commitment (invalid).
- //
- // TODO(yy): consider only sweeping the anchor from the local
- // commitment. Previously we would sweep up to three versions of
- // anchors because we don't know which one will be confirmed - if we
- // only broadcast the local anchor sweeping, our peer can broadcast
- // their commitment tx and replaces ours. With the new fee bumping, we
- // should be safe to only sweep our local anchor since we RBF it on
- // every new block, which destroys the remote's ability to pin us.
- sweeps := ht.AssertNumPendingSweeps(bob, 2)
-
- // The two anchor sweeping should have the same deadline height.
- deadlineHeight := forceCloseHeight + deadlineDeltaAnchor
- require.Equal(ht, deadlineHeight, sweeps[0].DeadlineHeight)
- require.Equal(ht, deadlineHeight, sweeps[1].DeadlineHeight)
+ var anchorSweep *walletrpc.PendingSweep
- // Remember the deadline height for the CPFP anchor.
- anchorDeadline := sweeps[0].DeadlineHeight
+ // Bob should have one pending sweep,
+ // - anchor sweeping from his local commitment.
+ expectedNumSweeps := 1
- // Mine a block so Bob's force closing tx stays in the mempool, which
- // also triggers the CPFP anchor sweep.
- ht.MineEmptyBlocks(1)
+ // For neutrino backend, Bob would have two anchor sweeps - one from
+ // the local and the other from the remote.
+ if ht.IsNeutrinoBackend() {
+ expectedNumSweeps = 2
+ }
- // Bob should still have two pending sweeps,
- // - anchor sweeping from his local commitment.
- // - anchor sweeping from his remote commitment (invalid).
- ht.AssertNumPendingSweeps(bob, 2)
+ anchorSweep = ht.AssertNumPendingSweeps(bob, expectedNumSweeps)[0]
- // We now check the expected fee and fee rate are used for Bob's anchor
- // sweeping tx.
- //
- // We should see Bob's anchor sweeping tx triggered by the above
- // block, along with his force close tx.
- txns := ht.GetNumTxsFromMempool(2)
+ // The anchor sweeping should have the expected deadline height.
+ deadlineHeight := forceCloseHeight + deadlineDeltaAnchor
+ require.Equal(ht, deadlineHeight, anchorSweep.DeadlineHeight)
- // Find the sweeping tx.
- sweepTx := ht.FindSweepingTxns(txns, 1, closeTxid)[0]
+ // Remember the deadline height for the CPFP anchor.
+ anchorDeadline := anchorSweep.DeadlineHeight
// Get the weight for Bob's anchor sweeping tx.
txWeight := ht.CalculateTxWeight(sweepTx)
@@ -228,11 +216,10 @@ func testSweepCPFPAnchorOutgoingTimeout(ht *lntest.HarnessTest) {
fee := uint64(ht.CalculateTxFee(sweepTx))
feeRate := uint64(ht.CalculateTxFeeRate(sweepTx))
- // feeFuncWidth is the width of the fee function. By the time we got
- // here, we've already mined one block, and the fee function maxes
- // out one block before the deadline, so the width is the original
- // deadline minus 2.
- feeFuncWidth := deadlineDeltaAnchor - 2
+ // feeFuncWidth is the width of the fee function. The fee function
+ // maxes out one block before the deadline, so the width is the
+ // original deadline minus 1.
+ feeFuncWidth := deadlineDeltaAnchor - 1
// Calculate the expected delta increased per block.
feeDelta := (cpfpBudget - startFeeAnchor).MulF64(
@@ -258,20 +245,27 @@ func testSweepCPFPAnchorOutgoingTimeout(ht *lntest.HarnessTest) {
// Bob's fee bumper should increase its fees.
ht.MineEmptyBlocks(1)
- // Bob should still have two pending sweeps,
- // - anchor sweeping from his local commitment.
- // - anchor sweeping from his remote commitment (invalid).
- ht.AssertNumPendingSweeps(bob, 2)
-
- // Make sure Bob's old sweeping tx has been removed from the
- // mempool.
- ht.AssertTxNotInMempool(sweepTx.TxHash())
+ // Bob should still have the anchor sweeping from his local
+ // commitment. His anchor sweeping from his remote commitment
+ // is invalid and should be removed.
+ ht.AssertNumPendingSweeps(bob, expectedNumSweeps)
// We expect to see two txns in the mempool,
// - Bob's force close tx.
// - Bob's anchor sweep tx.
ht.AssertNumTxsInMempool(2)
+ // Make sure Bob's old sweeping tx has been removed from the
+ // mempool.
+ ht.AssertTxNotInMempool(sweepTx.TxHash())
+
+ // Assert the two txns are still in the mempool and grab the
+ // sweeping tx.
+ //
+ // NOTE: must call it again after `AssertTxNotInMempool` to
+ // make sure we get the replaced tx.
+ _, sweepTx = ht.AssertForceCloseAndAnchorTxnsInMempool()
+
// We expect the fees to increase by i*delta.
expectedFee := startFeeAnchor + feeDelta.MulF64(float64(i))
expectedFeeRate := chainfee.NewSatPerKWeight(
@@ -280,11 +274,6 @@ func testSweepCPFPAnchorOutgoingTimeout(ht *lntest.HarnessTest) {
// We should see Bob's anchor sweeping tx being fee bumped
// since it's not confirmed, along with his force close tx.
- txns = ht.GetNumTxsFromMempool(2)
-
- // Find the sweeping tx.
- sweepTx = ht.FindSweepingTxns(txns, 1, closeTxid)[0]
-
// Calculate the fee rate of Bob's new sweeping tx.
feeRate = uint64(ht.CalculateTxFeeRate(sweepTx))
@@ -292,9 +281,9 @@ func testSweepCPFPAnchorOutgoingTimeout(ht *lntest.HarnessTest) {
fee = uint64(ht.CalculateTxFee(sweepTx))
ht.Logf("Bob(position=%v): txWeight=%v, expected: [fee=%d, "+
- "feerate=%v], got: [fee=%v, feerate=%v]",
+ "feerate=%v], got: [fee=%v, feerate=%v] in tx %v",
feeFuncWidth-i, txWeight, expectedFee,
- expectedFeeRate, fee, feeRate)
+ expectedFeeRate, fee, feeRate, sweepTx.TxHash())
// Assert Bob's tx has the expected fee and fee rate.
require.InEpsilonf(ht, uint64(expectedFee), fee, 0.01,
@@ -314,22 +303,23 @@ func testSweepCPFPAnchorOutgoingTimeout(ht *lntest.HarnessTest) {
// Mine one more block, we'd use up all the CPFP budget.
ht.MineEmptyBlocks(1)
+ // We expect to see two txns in the mempool,
+ // - Bob's force close tx.
+ // - Bob's anchor sweep tx.
+ ht.AssertNumTxsInMempool(2)
+
// Make sure Bob's old sweeping tx has been removed from the mempool.
ht.AssertTxNotInMempool(sweepTx.TxHash())
// Get the last sweeping tx - we should see two txns here, Bob's anchor
// sweeping tx and his force close tx.
- txns = ht.GetNumTxsFromMempool(2)
-
- // Find the sweeping tx.
- sweepTx = ht.FindSweepingTxns(txns, 1, closeTxid)[0]
-
- // Calculate the fee of Bob's new sweeping tx.
- fee = uint64(ht.CalculateTxFee(sweepTx))
+ //
+ // NOTE: must call it again after `AssertTxNotInMempool` to make sure
+ // we get the replaced tx.
+ _, sweepTx = ht.AssertForceCloseAndAnchorTxnsInMempool()
- // Assert the budget is now used up.
- require.InEpsilonf(ht, uint64(cpfpBudget), fee, 0.01, "want %d, got %d",
- cpfpBudget, fee)
+ // Bob should have the anchor sweeping from his local commitment.
+ ht.AssertNumPendingSweeps(bob, expectedNumSweeps)
// Mine one more block. Since Bob's budget has been used up, there
// won't be any more sweeping attempts. We now assert this by checking
@@ -340,10 +330,7 @@ func testSweepCPFPAnchorOutgoingTimeout(ht *lntest.HarnessTest) {
//
// We expect two txns here, one for the anchor sweeping, the other for
// the force close tx.
- txns = ht.GetNumTxsFromMempool(2)
-
- // Find the sweeping tx.
- currentSweepTx := ht.FindSweepingTxns(txns, 1, closeTxid)[0]
+ _, currentSweepTx := ht.AssertForceCloseAndAnchorTxnsInMempool()
// Assert the anchor sweep tx stays unchanged.
require.Equal(ht, sweepTx.TxHash(), currentSweepTx.TxHash())
@@ -357,6 +344,7 @@ func testSweepCPFPAnchorOutgoingTimeout(ht *lntest.HarnessTest) {
// the HTLC sweeping behaviors so we just perform a simple check and
// exit the test.
ht.AssertNumPendingSweeps(bob, 1)
+ ht.MineBlocksAndAssertNumTxes(1, 1)
// Finally, clean the mempool for the next test.
ht.CleanShutDown()
@@ -404,10 +392,7 @@ func testSweepCPFPAnchorIncomingTimeout(ht *lntest.HarnessTest) {
// Set up the fee estimator to return the testing fee rate when the
// conf target is the deadline.
- //
- // TODO(yy): switch to conf when `blockbeat` is in place.
- // ht.SetFeeEstimateWithConf(startFeeRateAnchor, deadlineDeltaAnchor)
- ht.SetFeeEstimate(startFeeRateAnchor)
+ ht.SetFeeEstimateWithConf(startFeeRateAnchor, deadlineDeltaAnchor)
// Create a preimage, that will be held by Carol.
var preimage lntypes.Preimage
@@ -447,6 +432,14 @@ func testSweepCPFPAnchorIncomingTimeout(ht *lntest.HarnessTest) {
ht.FundCoins(btcutil.SatoshiPerBitcoin, bob)
}
+ // Bob should have enough wallet UTXOs here to sweep the HTLC in the
+ // end of this test. However, due to a known issue, Bob's wallet may
+ // report there's no UTXO available. For details,
+ // - https://github.com/lightningnetwork/lnd/issues/8786
+ //
+ // TODO(yy): remove this step once the issue is resolved.
+ ht.FundCoins(btcutil.SatoshiPerBitcoin, bob)
+
// Subscribe the invoice.
streamCarol := carol.RPC.SubscribeSingleInvoice(payHash[:])
@@ -524,40 +517,30 @@ func testSweepCPFPAnchorIncomingTimeout(ht *lntest.HarnessTest) {
numBlocks := forceCloseHeight - currentHeight
ht.MineEmptyBlocks(int(numBlocks))
- // Assert Bob's force closing tx has been broadcast.
- closeTxid := ht.AssertNumTxsInMempool(1)[0]
+ // Assert Bob's force closing tx has been broadcast. We should see two
+ // txns in the mempool:
+ // 1. Bob's force closing tx.
+ // 2. Bob's anchor sweeping tx CPFPing the force close tx.
+ _, sweepTx := ht.AssertForceCloseAndAnchorTxnsInMempool()
- // Bob should have two pending sweeps,
+ // Bob should have one pending sweep,
// - anchor sweeping from his local commitment.
- // - anchor sweeping from his remote commitment (invalid).
- sweeps := ht.AssertNumPendingSweeps(bob, 2)
-
- // The two anchor sweeping should have the same deadline height.
- deadlineHeight := forceCloseHeight + deadlineDeltaAnchor
- require.Equal(ht, deadlineHeight, sweeps[0].DeadlineHeight)
- require.Equal(ht, deadlineHeight, sweeps[1].DeadlineHeight)
-
- // Remember the deadline height for the CPFP anchor.
- anchorDeadline := sweeps[0].DeadlineHeight
+ expectedNumSweeps := 1
- // Mine a block so Bob's force closing tx stays in the mempool, which
- // also triggers the CPFP anchor sweep.
- ht.MineEmptyBlocks(1)
+ // For neutrino backend, Bob would have two anchor sweeps - one from
+ // the local and the other from the remote.
+ if ht.IsNeutrinoBackend() {
+ expectedNumSweeps = 2
+ }
- // Bob should still have two pending sweeps,
- // - anchor sweeping from his local commitment.
- // - anchor sweeping from his remote commitment (invalid).
- ht.AssertNumPendingSweeps(bob, 2)
+ anchorSweep := ht.AssertNumPendingSweeps(bob, expectedNumSweeps)[0]
- // We now check the expected fee and fee rate are used for Bob's anchor
- // sweeping tx.
- //
- // We should see Bob's anchor sweeping tx triggered by the above
- // block, along with his force close tx.
- txns := ht.GetNumTxsFromMempool(2)
+ // The anchor sweeping should have the expected deadline height.
+ deadlineHeight := forceCloseHeight + deadlineDeltaAnchor
+ require.Equal(ht, deadlineHeight, anchorSweep.DeadlineHeight)
- // Find the sweeping tx.
- sweepTx := ht.FindSweepingTxns(txns, 1, closeTxid)[0]
+ // Remember the deadline height for the CPFP anchor.
+ anchorDeadline := anchorSweep.DeadlineHeight
// Get the weight for Bob's anchor sweeping tx.
txWeight := ht.CalculateTxWeight(sweepTx)
@@ -569,11 +552,10 @@ func testSweepCPFPAnchorIncomingTimeout(ht *lntest.HarnessTest) {
fee := uint64(ht.CalculateTxFee(sweepTx))
feeRate := uint64(ht.CalculateTxFeeRate(sweepTx))
- // feeFuncWidth is the width of the fee function. By the time we got
- // here, we've already mined one block, and the fee function maxes
- // out one block before the deadline, so the width is the original
- // deadline minus 2.
- feeFuncWidth := deadlineDeltaAnchor - 2
+ // feeFuncWidth is the width of the fee function. The fee function
+ // maxes out one block before the deadline, so the width is the
+ // original deadline minus 1.
+ feeFuncWidth := deadlineDeltaAnchor - 1
// Calculate the expected delta increased per block.
feeDelta := (cpfpBudget - startFeeAnchor).MulF64(
@@ -599,10 +581,15 @@ func testSweepCPFPAnchorIncomingTimeout(ht *lntest.HarnessTest) {
// Bob's fee bumper should increase its fees.
ht.MineEmptyBlocks(1)
- // Bob should still have two pending sweeps,
- // - anchor sweeping from his local commitment.
- // - anchor sweeping from his remote commitment (invalid).
- ht.AssertNumPendingSweeps(bob, 2)
+ // Bob should still have the anchor sweeping from his local
+ // commitment. His anchor sweeping from his remote commitment
+ // is invalid and should be removed.
+ ht.AssertNumPendingSweeps(bob, expectedNumSweeps)
+
+ // We expect to see two txns in the mempool,
+ // - Bob's force close tx.
+ // - Bob's anchor sweep tx.
+ ht.AssertNumTxsInMempool(2)
// Make sure Bob's old sweeping tx has been removed from the
// mempool.
@@ -611,7 +598,7 @@ func testSweepCPFPAnchorIncomingTimeout(ht *lntest.HarnessTest) {
// We expect to see two txns in the mempool,
// - Bob's force close tx.
// - Bob's anchor sweep tx.
- ht.AssertNumTxsInMempool(2)
+ _, sweepTx = ht.AssertForceCloseAndAnchorTxnsInMempool()
// We expect the fees to increase by i*delta.
expectedFee := startFeeAnchor + feeDelta.MulF64(float64(i))
@@ -619,13 +606,6 @@ func testSweepCPFPAnchorIncomingTimeout(ht *lntest.HarnessTest) {
expectedFee, txWeight,
)
- // We should see Bob's anchor sweeping tx being fee bumped
- // since it's not confirmed, along with his force close tx.
- txns = ht.GetNumTxsFromMempool(2)
-
- // Find the sweeping tx.
- sweepTx = ht.FindSweepingTxns(txns, 1, closeTxid)[0]
-
// Calculate the fee rate of Bob's new sweeping tx.
feeRate = uint64(ht.CalculateTxFeeRate(sweepTx))
@@ -633,9 +613,9 @@ func testSweepCPFPAnchorIncomingTimeout(ht *lntest.HarnessTest) {
fee = uint64(ht.CalculateTxFee(sweepTx))
ht.Logf("Bob(position=%v): txWeight=%v, expected: [fee=%d, "+
- "feerate=%v], got: [fee=%v, feerate=%v]",
+ "feerate=%v], got: [fee=%v, feerate=%v] in tx %v",
feeFuncWidth-i, txWeight, expectedFee,
- expectedFeeRate, fee, feeRate)
+ expectedFeeRate, fee, feeRate, sweepTx.TxHash())
// Assert Bob's tx has the expected fee and fee rate.
require.InEpsilonf(ht, uint64(expectedFee), fee, 0.01,
@@ -655,15 +635,17 @@ func testSweepCPFPAnchorIncomingTimeout(ht *lntest.HarnessTest) {
// Mine one more block, we'd use up all the CPFP budget.
ht.MineEmptyBlocks(1)
+ // We expect to see two txns in the mempool,
+ // - Bob's force close tx.
+ // - Bob's anchor sweep tx.
+ ht.AssertNumTxsInMempool(2)
+
// Make sure Bob's old sweeping tx has been removed from the mempool.
ht.AssertTxNotInMempool(sweepTx.TxHash())
// Get the last sweeping tx - we should see two txns here, Bob's anchor
// sweeping tx and his force close tx.
- txns = ht.GetNumTxsFromMempool(2)
-
- // Find the sweeping tx.
- sweepTx = ht.FindSweepingTxns(txns, 1, closeTxid)[0]
+ _, sweepTx = ht.AssertForceCloseAndAnchorTxnsInMempool()
// Calculate the fee of Bob's new sweeping tx.
fee = uint64(ht.CalculateTxFee(sweepTx))
@@ -681,10 +663,7 @@ func testSweepCPFPAnchorIncomingTimeout(ht *lntest.HarnessTest) {
//
// We expect two txns here, one for the anchor sweeping, the other for
// the force close tx.
- txns = ht.GetNumTxsFromMempool(2)
-
- // Find the sweeping tx.
- currentSweepTx := ht.FindSweepingTxns(txns, 1, closeTxid)[0]
+ _, currentSweepTx := ht.AssertForceCloseAndAnchorTxnsInMempool()
// Assert the anchor sweep tx stays unchanged.
require.Equal(ht, sweepTx.TxHash(), currentSweepTx.TxHash())
@@ -698,6 +677,7 @@ func testSweepCPFPAnchorIncomingTimeout(ht *lntest.HarnessTest) {
// the HTLC sweeping behaviors so we just perform a simple check and
// exit the test.
ht.AssertNumPendingSweeps(bob, 1)
+ ht.MineBlocksAndAssertNumTxes(1, 1)
// Finally, clean the mempool for the next test.
ht.CleanShutDown()
@@ -735,9 +715,9 @@ func testSweepHTLCs(ht *lntest.HarnessTest) {
cltvDelta := routing.MinCLTVDelta
// Start tracking the deadline delta of Bob's HTLCs. We need one block
- // for the CSV lock, and another block to trigger the sweeper to sweep.
- outgoingHTLCDeadline := int32(cltvDelta - 2)
- incomingHTLCDeadline := int32(lncfg.DefaultIncomingBroadcastDelta - 2)
+ // to trigger the sweeper to sweep.
+ outgoingHTLCDeadline := int32(cltvDelta - 1)
+ incomingHTLCDeadline := int32(lncfg.DefaultIncomingBroadcastDelta - 1)
// startFeeRate1 and startFeeRate2 are returned by the fee estimator in
// sat/kw. They will be used as the starting fee rate for the linear
@@ -794,6 +774,14 @@ func testSweepHTLCs(ht *lntest.HarnessTest) {
ht.FundCoins(btcutil.SatoshiPerBitcoin, bob)
ht.FundCoins(btcutil.SatoshiPerBitcoin, bob)
+ // Bob should have enough wallet UTXOs here to sweep the HTLC in the
+ // end of this test. However, due to a known issue, Bob's wallet may
+ // report there's no UTXO available. For details,
+ // - https://github.com/lightningnetwork/lnd/issues/8786
+ //
+ // TODO(yy): remove this step once the issue is resolved.
+ ht.FundCoins(btcutil.SatoshiPerBitcoin, bob)
+
// For neutrino backend, we need two more UTXOs for Bob to create his
// sweeping txns.
if ht.IsNeutrinoBackend() {
@@ -894,34 +882,35 @@ func testSweepHTLCs(ht *lntest.HarnessTest) {
// Bob should now have two pending sweeps, one for the anchor on the
// local commitment, the other on the remote commitment.
- ht.AssertNumPendingSweeps(bob, 2)
+ expectedNumSweeps := 1
- // Assert Bob's force closing tx has been broadcast.
- ht.AssertNumTxsInMempool(1)
+ // For neutrino backend, we expect the anchor output from his remote
+ // commitment to be present.
+ if ht.IsNeutrinoBackend() {
+ expectedNumSweeps = 2
+ }
+
+ ht.AssertNumPendingSweeps(bob, expectedNumSweeps)
+
+ // We expect to see two txns in the mempool:
+ // 1. Bob's force closing tx.
+ // 2. Bob's anchor CPFP sweeping tx.
+ ht.AssertNumTxsInMempool(2)
- // Mine the force close tx, which triggers Bob's contractcourt to offer
- // his outgoing HTLC to his sweeper.
+ // Mine the force close tx and CPFP sweeping tx, which triggers Bob's
+ // contractcourt to offer his outgoing HTLC to his sweeper.
//
// NOTE: HTLC outputs are only offered to sweeper when the force close
// tx is confirmed and the CSV has reached.
- ht.MineBlocksAndAssertNumTxes(1, 1)
-
- // Update the blocks left till Bob force closes Alice->Bob.
- blocksTillIncomingSweep--
-
- // Bob should have two pending sweeps, one for the anchor sweeping, the
- // other for the outgoing HTLC.
- ht.AssertNumPendingSweeps(bob, 2)
-
- // Mine one block to confirm Bob's anchor sweeping tx, which will
- // trigger his sweeper to publish the HTLC sweeping tx.
- ht.MineBlocksAndAssertNumTxes(1, 1)
+ ht.MineBlocksAndAssertNumTxes(1, 2)
// Update the blocks left till Bob force closes Alice->Bob.
blocksTillIncomingSweep--
- // Bob should now have one sweep and one sweeping tx in the mempool.
+ // Bob should have one pending sweep for the outgoing HTLC.
ht.AssertNumPendingSweeps(bob, 1)
+
+ // Bob should have one sweeping tx in the mempool.
outgoingSweep := ht.GetNumTxsFromMempool(1)[0]
// Check the shape of the sweeping tx - we expect it to be
@@ -945,8 +934,8 @@ func testSweepHTLCs(ht *lntest.HarnessTest) {
// Assert the initial sweeping tx is using the start fee rate.
outgoingStartFeeRate := ht.CalculateTxFeeRate(outgoingSweep)
require.InEpsilonf(ht, uint64(startFeeRate1),
- uint64(outgoingStartFeeRate), 0.01, "want %d, got %d",
- startFeeRate1, outgoingStartFeeRate)
+ uint64(outgoingStartFeeRate), 0.01, "want %d, got %d in tx=%v",
+ startFeeRate1, outgoingStartFeeRate, outgoingSweep.TxHash())
// Now the start fee rate is checked, we can calculate the fee rate
// delta.
@@ -971,13 +960,12 @@ func testSweepHTLCs(ht *lntest.HarnessTest) {
)
ht.Logf("Bob's %s HTLC (deadline=%v): txWeight=%v, want "+
- "feerate=%v, got feerate=%v, delta=%v", desc,
+ "feerate=%v, got feerate=%v, delta=%v in tx %v", desc,
deadline-position, txSize, expectedFeeRate,
- feeRate, delta)
+ feeRate, delta, sweepTx.TxHash())
require.InEpsilonf(ht, uint64(expectedFeeRate), uint64(feeRate),
- 0.01, "want %v, got %v in tx=%v", expectedFeeRate,
- feeRate, sweepTx.TxHash())
+ 0.01, "want %v, got %v", expectedFeeRate, feeRate)
}
// We now mine enough blocks to trigger Bob to force close channel
@@ -1019,22 +1007,33 @@ func testSweepHTLCs(ht *lntest.HarnessTest) {
// Update Bob's fee function position.
outgoingFuncPosition++
- // Bob should now have three pending sweeps:
+ // Bob should now have two pending sweeps:
// 1. the outgoing HTLC output.
// 2. the anchor output from his local commitment.
- // 3. the anchor output from his remote commitment.
- ht.AssertNumPendingSweeps(bob, 3)
+ expectedNumSweeps = 2
- // We should see two txns in the mempool:
+ // For neutrino backend, we expect the anchor output from his remote
+ // commitment to be present.
+ if ht.IsNeutrinoBackend() {
+ expectedNumSweeps = 3
+ }
+
+ ht.AssertNumPendingSweeps(bob, expectedNumSweeps)
+
+ // We should see three txns in the mempool:
// 1. Bob's outgoing HTLC sweeping tx.
// 2. Bob's force close tx for Alice->Bob.
- txns := ht.GetNumTxsFromMempool(2)
+ // 3. Bob's anchor CPFP sweeping tx for Alice->Bob.
+ txns := ht.GetNumTxsFromMempool(3)
// Find the force close tx - we expect it to have a single input.
closeTx := txns[0]
if len(closeTx.TxIn) != 1 {
closeTx = txns[1]
}
+ if len(closeTx.TxIn) != 1 {
+ closeTx = txns[2]
+ }
// We don't care the behavior of the anchor sweep in this test, so we
// mine the force close tx to trigger Bob's contractcourt to offer his
@@ -1050,13 +1049,6 @@ func testSweepHTLCs(ht *lntest.HarnessTest) {
// 3. the anchor sweeping on Alice-> Bob.
ht.AssertNumPendingSweeps(bob, 3)
- // Mine one block, which will trigger his sweeper to publish his
- // incoming HTLC sweeping tx.
- ht.MineEmptyBlocks(1)
-
- // Update the fee function's positions.
- outgoingFuncPosition++
-
// We should see three txns in the mempool:
// 1. the outgoing HTLC sweeping tx.
// 2. the incoming HTLC sweeping tx.
@@ -1224,8 +1216,9 @@ func testSweepHTLCs(ht *lntest.HarnessTest) {
// Test:
// 1. Alice's anchor sweeping is not attempted, instead, it should be swept
// together with her to_local output using the no deadline path.
-// 2. Bob would also sweep his anchor and to_local outputs in a single
-// sweeping tx using the no deadline path.
+// 2. Bob would also sweep his anchor and to_local outputs separately due to
+// they have different deadline heights, which means only the to_local
+// sweeping tx will succeed as the anchor sweeping is not economical.
// 3. Both Alice and Bob's RBF attempts are using the fee rates calculated
// from the deadline and budget.
// 4. Wallet UTXOs requirements are met - neither Alice nor Bob needs wallet
@@ -1238,10 +1231,19 @@ func testSweepCommitOutputAndAnchor(ht *lntest.HarnessTest) {
// config.
deadline := uint32(1000)
- // The actual deadline used by the fee function will be one block off
- // from the deadline configured as we require one block to be mined to
- // trigger the sweep.
- deadlineA, deadlineB := deadline-1, deadline-1
+ // For Alice, since her commit output is offered to the sweeper at
+ // CSV-1. With a deadline of 1000, her actual width of her fee func is
+ // CSV+1000-1.
+ deadlineA := deadline + 1
+
+ // For Bob, the actual deadline used by the fee function will be one
+ // block off from the deadline configured as we require one block to be
+ // mined to trigger the sweep. In addition, when sweeping his to_local
+ // output from Alice's commit tx, because of CSV of 2, the starting
+ // height will be "force_close_height+2", which means when the sweep
+ // request is received by the sweeper, the actual deadline delta is
+ // "deadline+1".
+ deadlineB := deadline + 1
// startFeeRate is returned by the fee estimator in sat/kw. This
// will be used as the starting fee rate for the linear fee func used
@@ -1252,7 +1254,7 @@ func testSweepCommitOutputAndAnchor(ht *lntest.HarnessTest) {
// Set up the fee estimator to return the testing fee rate when the
// conf target is the deadline.
- ht.SetFeeEstimateWithConf(startFeeRate, deadlineA)
+ ht.SetFeeEstimateWithConf(startFeeRate, deadlineB)
// toLocalCSV is the CSV delay for Alice's to_local output. We use a
// small value to save us from mining blocks.
@@ -1260,25 +1262,7 @@ func testSweepCommitOutputAndAnchor(ht *lntest.HarnessTest) {
// NOTE: once the force close tx is confirmed, we expect anchor
// sweeping starts. Then two more block later the commit output
// sweeping starts.
- //
- // NOTE: The CSV value is chosen to be 3 instead of 2, to reduce the
- // possibility of flakes as there is a race between the two goroutines:
- // G1 - Alice's sweeper receives the commit output.
- // G2 - Alice's sweeper receives the new block mined.
- // G1 is triggered by the same block being received by Alice's
- // contractcourt, deciding the commit output is mature and offering it
- // to her sweeper. Normally, we'd expect G2 to be finished before G1
- // because it's the same block processed by both contractcourt and
- // sweeper. However, if G2 is delayed (maybe the sweeper is slow in
- // finishing its previous round), G1 may finish before G2. This will
- // cause the sweeper to add the commit output to its pending inputs,
- // and once G2 fires, it will then start sweeping this output,
- // resulting a valid sweep tx being created using her commit and anchor
- // outputs.
- //
- // TODO(yy): fix the above issue by making sure subsystems share the
- // same view on current block height.
- toLocalCSV := 3
+ toLocalCSV := 2
// htlcAmt is the amount of the HTLC in sats, this should be Alice's
// to_remote amount that goes to Bob.
@@ -1377,155 +1361,41 @@ func testSweepCommitOutputAndAnchor(ht *lntest.HarnessTest) {
// - commit sweeping from the to_remote on Alice's commit tx.
ht.AssertNumPendingSweeps(bob, 2)
+ // Bob's sweeper should have broadcast the commit output sweeping tx.
+ // At the block which mined the force close tx, Bob's `chainWatcher`
+ // will process the blockbeat first, which sends a signal to his
+ // `ChainArbitrator` to launch the resolvers. Once launched, the sweep
+ // requests will be sent to the sweeper. Finally, when the sweeper
+ // receives this blockbeat, it will create the sweeping tx and publish
+ // it.
+ ht.AssertNumTxsInMempool(1)
+
// Mine one more empty block should trigger Bob's sweeping. Since we
- // use a CSV of 3, this means Alice's to_local output is one block away
- // from being mature.
+ // use a CSV of 2, this means Alice's to_local output is now mature.
ht.MineEmptyBlocks(1)
- // We expect to see one sweeping tx in the mempool:
- // - Alice's anchor sweeping tx must have been failed due to the fee
- // rate chosen in this test - the anchor sweep tx has no output.
- // - Bob's sweeping tx, which sweeps both his anchor and commit outputs.
- bobSweepTx := ht.GetNumTxsFromMempool(1)[0]
-
// We expect two pending sweeps for Bob - anchor and commit outputs.
- pendingSweepBob := ht.AssertNumPendingSweeps(bob, 2)[0]
-
- // The sweeper may be one block behind contractcourt, so we double
- // check the actual deadline.
- //
- // TODO(yy): assert they are equal once blocks are synced via
- // `blockbeat`.
- currentHeight := int32(ht.CurrentHeight())
- actualDeadline := int32(pendingSweepBob.DeadlineHeight) - currentHeight
- if actualDeadline != int32(deadlineB) {
- ht.Logf("!!! Found unsynced block between sweeper and "+
- "contractcourt, expected deadline=%v, got=%v",
- deadlineB, actualDeadline)
-
- deadlineB = uint32(actualDeadline)
- }
-
- // Alice should still have one pending sweep - the anchor output.
- ht.AssertNumPendingSweeps(alice, 1)
-
- // We now check Bob's sweeping tx.
- //
- // Bob's sweeping tx should have 2 inputs, one from his commit output,
- // the other from his anchor output.
- require.Len(ht, bobSweepTx.TxIn, 2)
-
- // Because Bob is sweeping without deadline pressure, the starting fee
- // rate should be the min relay fee rate.
- bobStartFeeRate := ht.CalculateTxFeeRate(bobSweepTx)
- require.InEpsilonf(ht, uint64(chainfee.FeePerKwFloor),
- uint64(bobStartFeeRate), 0.01, "want %v, got %v",
- chainfee.FeePerKwFloor, bobStartFeeRate)
-
- // With Bob's starting fee rate being validated, we now calculate his
- // ending fee rate and fee rate delta.
- //
- // Bob sweeps two inputs - anchor and commit, so the starting budget
- // should come from the sum of these two.
- bobValue := btcutil.Amount(bobToLocal + 330)
- bobBudget := bobValue.MulF64(contractcourt.DefaultBudgetRatio)
-
- // Calculate the ending fee rate and fee rate delta used in his fee
- // function.
- bobTxWeight := ht.CalculateTxWeight(bobSweepTx)
- bobEndingFeeRate := chainfee.NewSatPerKWeight(bobBudget, bobTxWeight)
- bobFeeRateDelta := (bobEndingFeeRate - bobStartFeeRate) /
- chainfee.SatPerKWeight(deadlineB-1)
-
- // Mine an empty block, which should trigger Alice's contractcourt to
- // offer her commit output to the sweeper.
- ht.MineEmptyBlocks(1)
-
- // Alice should have both anchor and commit as the pending sweep
- // requests.
- aliceSweeps := ht.AssertNumPendingSweeps(alice, 2)
- aliceAnchor, aliceCommit := aliceSweeps[0], aliceSweeps[1]
- if aliceAnchor.AmountSat > aliceCommit.AmountSat {
- aliceAnchor, aliceCommit = aliceCommit, aliceAnchor
- }
-
- // The sweeper may be one block behind contractcourt, so we double
- // check the actual deadline.
- //
- // TODO(yy): assert they are equal once blocks are synced via
- // `blockbeat`.
- currentHeight = int32(ht.CurrentHeight())
- actualDeadline = int32(aliceCommit.DeadlineHeight) - currentHeight
- if actualDeadline != int32(deadlineA) {
- ht.Logf("!!! Found unsynced block between Alice's sweeper and "+
- "contractcourt, expected deadline=%v, got=%v",
- deadlineA, actualDeadline)
-
- deadlineA = uint32(actualDeadline)
- }
-
- // We now wait for 30 seconds to overcome the flake - there's a block
- // race between contractcourt and sweeper, causing the sweep to be
- // broadcast earlier.
- //
- // TODO(yy): remove this once `blockbeat` is in place.
- aliceStartPosition := 0
- var aliceFirstSweepTx *wire.MsgTx
- err := wait.NoError(func() error {
- mem := ht.GetRawMempool()
- if len(mem) != 2 {
- return fmt.Errorf("want 2, got %v in mempool: %v",
- len(mem), mem)
- }
-
- // If there are two txns, it means Alice's sweep tx has been
- // created and published.
- aliceStartPosition = 1
-
- txns := ht.GetNumTxsFromMempool(2)
- aliceFirstSweepTx = txns[0]
-
- // Reassign if the second tx is larger.
- if txns[1].TxOut[0].Value > aliceFirstSweepTx.TxOut[0].Value {
- aliceFirstSweepTx = txns[1]
- }
-
- return nil
- }, wait.DefaultTimeout)
- ht.Logf("Checking mempool got: %v", err)
-
- // Mine an empty block, which should trigger Alice's sweeper to publish
- // her commit sweep along with her anchor output.
- ht.MineEmptyBlocks(1)
+ ht.AssertNumPendingSweeps(bob, 2)
- // If Alice has already published her initial sweep tx, the above mined
- // block would trigger an RBF. We now need to assert the mempool has
- // removed the replaced tx.
- if aliceFirstSweepTx != nil {
- ht.AssertTxNotInMempool(aliceFirstSweepTx.TxHash())
- }
+ // We expect two pending sweeps for Alice - anchor and commit outputs.
+ ht.AssertNumPendingSweeps(alice, 2)
// We also remember the positions of fee functions used by Alice and
// Bob. They will be used to calculate the expected fee rates later.
- //
- // Alice's sweeping tx has just been created, so she is at the starting
- // position. For Bob, due to the above mined blocks, his fee function
- // is now at position 2.
- alicePosition, bobPosition := uint32(aliceStartPosition), uint32(2)
+ alicePosition, bobPosition := uint32(0), uint32(1)
// We should see two txns in the mempool:
// - Alice's sweeping tx, which sweeps her commit output at the
// starting fee rate - Alice's anchor output won't be swept with her
// commit output together because they have different deadlines.
- // - Bob's previous sweeping tx, which sweeps both his anchor and
- // commit outputs, at the starting fee rate.
+ // - Bob's previous sweeping tx, which sweeps his and commit outputs,
+ // at the starting fee rate.
txns := ht.GetNumTxsFromMempool(2)
// Assume the first tx is Alice's sweeping tx, if the second tx has a
// larger output value, then that's Alice's as her to_local value is
// much gearter.
- aliceSweepTx := txns[0]
- bobSweepTx = txns[1]
+ aliceSweepTx, bobSweepTx := txns[0], txns[1]
// Swap them if bobSweepTx is smaller.
if bobSweepTx.TxOut[0].Value > aliceSweepTx.TxOut[0].Value {
@@ -1539,20 +1409,6 @@ func testSweepCommitOutputAndAnchor(ht *lntest.HarnessTest) {
require.Len(ht, aliceSweepTx.TxIn, 1)
require.Len(ht, aliceSweepTx.TxOut, 1)
- // We now check Alice's sweeping tx to see if it's already published.
- //
- // TODO(yy): remove this check once we have better block control.
- aliceSweeps = ht.AssertNumPendingSweeps(alice, 2)
- aliceCommit = aliceSweeps[0]
- if aliceCommit.AmountSat < aliceSweeps[1].AmountSat {
- aliceCommit = aliceSweeps[1]
- }
- if aliceCommit.BroadcastAttempts > 1 {
- ht.Logf("!!! Alice's commit sweep has already been broadcast, "+
- "broadcast_attempts=%v", aliceCommit.BroadcastAttempts)
- alicePosition = aliceCommit.BroadcastAttempts
- }
-
// Alice's sweeping tx should use the min relay fee rate as there's no
// deadline pressure.
aliceStartingFeeRate := chainfee.FeePerKwFloor
@@ -1567,7 +1423,7 @@ func testSweepCommitOutputAndAnchor(ht *lntest.HarnessTest) {
aliceTxWeight := uint64(ht.CalculateTxWeight(aliceSweepTx))
aliceEndingFeeRate := sweep.DefaultMaxFeeRate.FeePerKWeight()
aliceFeeRateDelta := (aliceEndingFeeRate - aliceStartingFeeRate) /
- chainfee.SatPerKWeight(deadlineA-1)
+ chainfee.SatPerKWeight(deadlineA)
aliceFeeRate := ht.CalculateTxFeeRate(aliceSweepTx)
expectedFeeRateAlice := aliceStartingFeeRate +
@@ -1576,119 +1432,41 @@ func testSweepCommitOutputAndAnchor(ht *lntest.HarnessTest) {
uint64(aliceFeeRate), 0.02, "want %v, got %v",
expectedFeeRateAlice, aliceFeeRate)
- // We now check Bob' sweeping tx.
- //
- // The above mined block will trigger Bob's sweeper to RBF his previous
- // sweeping tx, which will fail due to RBF rule#4 - the additional fees
- // paid are not sufficient. This happens as our default incremental
- // relay fee rate is 1 sat/vb, with the tx size of 771 weight units, or
- // 192 vbytes, we need to pay at least 192 sats more to be able to RBF.
- // However, since Bob's budget delta is (100_000 + 330) * 0.5 / 1008 =
- // 49.77 sats, it means Bob can only perform a successful RBF every 4
- // blocks.
- //
- // Assert Bob's sweeping tx is not RBFed.
- bobFeeRate := ht.CalculateTxFeeRate(bobSweepTx)
- expectedFeeRateBob := bobStartFeeRate
- require.InEpsilonf(ht, uint64(expectedFeeRateBob), uint64(bobFeeRate),
- 0.01, "want %d, got %d", expectedFeeRateBob, bobFeeRate)
-
- // reloclateAlicePosition is a temp hack to find the actual fee
- // function position used for Alice. Due to block sync issue among the
- // subsystems, we can end up having this situation:
- // - sweeper is at block 2, starts sweeping an input with deadline 100.
- // - fee bumper is at block 1, and thinks the conf target is 99.
- // - new block 3 arrives, the func now is at position 2.
+ // We now check Bob's sweeping tx.
//
- // TODO(yy): fix it using `blockbeat`.
- reloclateAlicePosition := func() {
- // Mine an empty block to trigger the possible RBF attempts.
- ht.MineEmptyBlocks(1)
+ // Bob's sweeping tx should have one input, which is his commit output.
+ // His anchor output won't be swept due to it being uneconomical.
+ require.Len(ht, bobSweepTx.TxIn, 1, "tx=%v", bobSweepTx.TxHash())
- // Increase the positions for both fee functions.
- alicePosition++
- bobPosition++
-
- // We expect two pending sweeps for both nodes as we are mining
- // empty blocks.
- ht.AssertNumPendingSweeps(alice, 2)
- ht.AssertNumPendingSweeps(bob, 2)
-
- // We expect to see both Alice's and Bob's sweeping txns in the
- // mempool.
- ht.AssertNumTxsInMempool(2)
-
- // Make sure Alice's old sweeping tx has been removed from the
- // mempool.
- ht.AssertTxNotInMempool(aliceSweepTx.TxHash())
-
- // We should see two txns in the mempool:
- // - Alice's sweeping tx, which sweeps both her anchor and
- // commit outputs, using the increased fee rate.
- // - Bob's previous sweeping tx, which sweeps both his anchor
- // and commit outputs, at the possible increased fee rate.
- txns = ht.GetNumTxsFromMempool(2)
-
- // Assume the first tx is Alice's sweeping tx, if the second tx
- // has a larger output value, then that's Alice's as her
- // to_local value is much gearter.
- aliceSweepTx = txns[0]
- bobSweepTx = txns[1]
-
- // Swap them if bobSweepTx is smaller.
- if bobSweepTx.TxOut[0].Value > aliceSweepTx.TxOut[0].Value {
- aliceSweepTx, bobSweepTx = bobSweepTx, aliceSweepTx
- }
-
- // Alice's sweeping tx should be increased.
- aliceFeeRate := ht.CalculateTxFeeRate(aliceSweepTx)
- expectedFeeRate := aliceStartingFeeRate +
- aliceFeeRateDelta*chainfee.SatPerKWeight(alicePosition)
-
- ht.Logf("Alice(deadline=%v): txWeight=%v, want feerate=%v, "+
- "got feerate=%v, delta=%v", deadlineA-alicePosition,
- aliceTxWeight, expectedFeeRate, aliceFeeRate,
- aliceFeeRateDelta)
-
- nextPosition := alicePosition + 1
- nextFeeRate := aliceStartingFeeRate +
- aliceFeeRateDelta*chainfee.SatPerKWeight(nextPosition)
-
- // Calculate the distances.
- delta := math.Abs(float64(aliceFeeRate - expectedFeeRate))
- deltaNext := math.Abs(float64(aliceFeeRate - nextFeeRate))
-
- // Exit early if the first distance is smaller - it means we
- // are at the right fee func position.
- if delta < deltaNext {
- require.InEpsilonf(ht, uint64(expectedFeeRate),
- uint64(aliceFeeRate), 0.02, "want %v, got %v "+
- "in tx=%v", expectedFeeRate,
- aliceFeeRate, aliceSweepTx.TxHash())
-
- return
- }
-
- alicePosition++
- ht.Logf("Jump position for Alice(deadline=%v): txWeight=%v, "+
- "want feerate=%v, got feerate=%v, delta=%v",
- deadlineA-alicePosition, aliceTxWeight, nextFeeRate,
- aliceFeeRate, aliceFeeRateDelta)
+ // Because Bob is sweeping without deadline pressure, the starting fee
+ // rate should be the min relay fee rate.
+ bobStartFeeRate := ht.CalculateTxFeeRate(bobSweepTx)
+ require.InEpsilonf(ht, uint64(chainfee.FeePerKwFloor),
+ uint64(bobStartFeeRate), 0.01, "want %v, got %v",
+ chainfee.FeePerKwFloor, bobStartFeeRate)
- require.InEpsilonf(ht, uint64(nextFeeRate),
- uint64(aliceFeeRate), 0.02, "want %v, got %v in tx=%v",
- nextFeeRate, aliceFeeRate, aliceSweepTx.TxHash())
- }
+ // With Bob's starting fee rate being validated, we now calculate his
+ // ending fee rate and fee rate delta.
+ //
+ // Bob sweeps one input - the commit output.
+ bobValue := btcutil.Amount(bobToLocal)
+ bobBudget := bobValue.MulF64(contractcourt.DefaultBudgetRatio)
- reloclateAlicePosition()
+ // Calculate the ending fee rate and fee rate delta used in his fee
+ // function.
+ bobTxWeight := ht.CalculateTxWeight(bobSweepTx)
+ bobEndingFeeRate := chainfee.NewSatPerKWeight(bobBudget, bobTxWeight)
+ bobFeeRateDelta := (bobEndingFeeRate - bobStartFeeRate) /
+ chainfee.SatPerKWeight(deadlineB-1)
+ expectedFeeRateBob := bobStartFeeRate
- // We now mine 7 empty blocks. For each block mined, we'd see Alice's
+ // We now mine 8 empty blocks. For each block mined, we'd see Alice's
// sweeping tx being RBFed. For Bob, he performs a fee bump every
- // block, but will only publish a tx every 4 blocks mined as some of
+ // block, but will only publish a tx every 3 blocks mined as some of
// the fee bumps is not sufficient to meet the fee requirements
// enforced by RBF. Since his fee function is already at position 1,
// mining 7 more blocks means he will RBF his sweeping tx twice.
- for i := 1; i < 7; i++ {
+ for i := 1; i < 9; i++ {
// Mine an empty block to trigger the possible RBF attempts.
ht.MineEmptyBlocks(1)
@@ -1711,9 +1489,9 @@ func testSweepCommitOutputAndAnchor(ht *lntest.HarnessTest) {
// Make sure Bob's old sweeping tx has been removed from the
// mempool. Since Bob's sweeping tx will only be successfully
- // RBFed every 4 blocks, his old sweeping tx only will be
- // removed when there are 4 blocks increased.
- if bobPosition%4 == 0 {
+ // RBFed every 3 blocks, his old sweeping tx only will be
+ // removed when there are 3 blocks increased.
+ if bobPosition%3 == 0 {
ht.AssertTxNotInMempool(bobSweepTx.TxHash())
}
@@ -1749,9 +1527,10 @@ func testSweepCommitOutputAndAnchor(ht *lntest.HarnessTest) {
aliceFeeRateDelta*chainfee.SatPerKWeight(alicePosition)
ht.Logf("Alice(deadline=%v): txWeight=%v, want feerate=%v, "+
- "got feerate=%v, delta=%v", deadlineA-alicePosition,
- aliceTxWeight, expectedFeeRateAlice, aliceFeeRate,
- aliceFeeRateDelta)
+ "got feerate=%v, delta=%v in tx %v",
+ deadlineA-alicePosition, aliceTxWeight,
+ expectedFeeRateAlice, aliceFeeRate,
+ aliceFeeRateDelta, aliceSweepTx.TxHash())
require.InEpsilonf(ht, uint64(expectedFeeRateAlice),
uint64(aliceFeeRate), 0.02, "want %v, got %v in tx=%v",
@@ -1767,16 +1546,17 @@ func testSweepCommitOutputAndAnchor(ht *lntest.HarnessTest) {
accumulatedDelta := bobFeeRateDelta *
chainfee.SatPerKWeight(bobPosition)
- // Bob's sweeping tx will only be successfully RBFed every 4
+ // Bob's sweeping tx will only be successfully RBFed every 3
// blocks.
- if bobPosition%4 == 0 {
+ if bobPosition%3 == 0 {
expectedFeeRateBob = bobStartFeeRate + accumulatedDelta
}
ht.Logf("Bob(deadline=%v): txWeight=%v, want feerate=%v, "+
- "got feerate=%v, delta=%v", deadlineB-bobPosition,
- bobTxWeight, expectedFeeRateBob, bobFeeRate,
- bobFeeRateDelta)
+ "got feerate=%v, delta=%v in tx %v",
+ deadlineB-bobPosition, bobTxWeight,
+ expectedFeeRateBob, bobFeeRate,
+ bobFeeRateDelta, bobSweepTx.TxHash())
require.InEpsilonf(ht, uint64(expectedFeeRateBob),
uint64(bobFeeRate), 0.02, "want %d, got %d in tx=%v",
@@ -1792,7 +1572,9 @@ func testSweepCommitOutputAndAnchor(ht *lntest.HarnessTest) {
// CPFP, then RBF. Along the way, we check the `BumpFee` can properly update
// the fee function used by supplying new params.
func testBumpFee(ht *lntest.HarnessTest) {
- runBumpFee(ht, ht.Alice)
+ alice := ht.NewNodeWithCoins("Alice", nil)
+
+ runBumpFee(ht, alice)
}
// runBumpFee checks the `BumpFee` RPC can properly bump the fee of a given
@@ -2115,6 +1897,7 @@ func testBumpForceCloseFee(ht *lntest.HarnessTest) {
if ht.IsNeutrinoBackend() {
ht.Skipf("skipping BumpForceCloseFee test for neutrino backend")
}
+
// fundAmt is the funding amount.
fundAmt := btcutil.Amount(1_000_000)
@@ -2140,6 +1923,7 @@ func testBumpForceCloseFee(ht *lntest.HarnessTest) {
// Unwrap the results.
chanPoint := chanPoints[0]
alice := nodes[0]
+ bob := nodes[1]
// We need to fund alice with 2 wallet inputs so that we can test to
// increase the fee rate of the anchor cpfp via two subsequent calls of
@@ -2252,6 +2036,10 @@ func testBumpForceCloseFee(ht *lntest.HarnessTest) {
txns = ht.GetNumTxsFromMempool(2)
ht.FindSweepingTxns(txns, 1, closingTx.TxHash())
+ // Shut down Bob, otherwise he will create a sweeping tx to collect the
+ // to_remote output once Alice's force closing tx is confirmed below.
+ ht.Shutdown(bob)
+
// Mine both transactions, the closing tx and the anchor cpfp tx.
// This is needed to clean up the mempool.
ht.MineBlocksAndAssertNumTxes(1, 2)
diff --git a/itest/lnd_switch_test.go b/itest/lnd_switch_test.go
index f1ac628cab..10be216130 100644
--- a/itest/lnd_switch_test.go
+++ b/itest/lnd_switch_test.go
@@ -29,7 +29,6 @@ func testSwitchCircuitPersistence(ht *lntest.HarnessTest) {
// Setup our test scenario. We should now have four nodes running with
// three channels.
s := setupScenarioFourNodes(ht)
- defer s.cleanUp()
// Restart the intermediaries and the sender.
ht.RestartNode(s.dave)
@@ -99,13 +98,12 @@ func testSwitchOfflineDelivery(ht *lntest.HarnessTest) {
// Setup our test scenario. We should now have four nodes running with
// three channels.
s := setupScenarioFourNodes(ht)
- defer s.cleanUp()
// First, disconnect Dave and Alice so that their link is broken.
ht.DisconnectNodes(s.dave, s.alice)
// Then, reconnect them to ensure Dave doesn't just fail back the htlc.
- ht.ConnectNodes(s.dave, s.alice)
+ ht.EnsureConnected(s.dave, s.alice)
// Wait to ensure that the payment remain are not failed back after
// reconnecting. All node should report the number payments initiated
@@ -175,7 +173,6 @@ func testSwitchOfflineDeliveryPersistence(ht *lntest.HarnessTest) {
// Setup our test scenario. We should now have four nodes running with
// three channels.
s := setupScenarioFourNodes(ht)
- defer s.cleanUp()
// Disconnect the two intermediaries, Alice and Dave, by shutting down
// Alice.
@@ -264,7 +261,6 @@ func testSwitchOfflineDeliveryOutgoingOffline(ht *lntest.HarnessTest) {
// three channels. Note that we won't call the cleanUp function here as
// we will manually stop the node Carol and her channel.
s := setupScenarioFourNodes(ht)
- defer s.cleanUp()
// Disconnect the two intermediaries, Alice and Dave, so that when carol
// restarts, the response will be held by Dave.
@@ -355,8 +351,6 @@ type scenarioFourNodes struct {
chanPointAliceBob *lnrpc.ChannelPoint
chanPointCarolDave *lnrpc.ChannelPoint
chanPointDaveAlice *lnrpc.ChannelPoint
-
- cleanUp func()
}
// setupScenarioFourNodes creates a topology for switch tests. It will create
@@ -383,7 +377,9 @@ func setupScenarioFourNodes(ht *lntest.HarnessTest) *scenarioFourNodes {
}
// Grab the standby nodes.
- alice, bob := ht.Alice, ht.Bob
+ alice := ht.NewNodeWithCoins("Alice", nil)
+ bob := ht.NewNodeWithCoins("bob", nil)
+ ht.ConnectNodes(alice, bob)
// As preliminary setup, we'll create two new nodes: Carol and Dave,
// such that we now have a 4 node, 3 channel topology. Dave will make
@@ -431,21 +427,9 @@ func setupScenarioFourNodes(ht *lntest.HarnessTest) *scenarioFourNodes {
// above.
ht.CompletePaymentRequestsNoWait(bob, payReqs, chanPointAliceBob)
- // Create a cleanUp to wipe the states.
- cleanUp := func() {
- if ht.Failed() {
- ht.Skip("Skipped cleanup for failed test")
- return
- }
-
- ht.CloseChannel(alice, chanPointAliceBob)
- ht.CloseChannel(dave, chanPointDaveAlice)
- ht.CloseChannel(carol, chanPointCarolDave)
- }
-
s := &scenarioFourNodes{
alice, bob, carol, dave, chanPointAliceBob,
- chanPointCarolDave, chanPointDaveAlice, cleanUp,
+ chanPointCarolDave, chanPointDaveAlice,
}
// Wait until all nodes in the network have 5 outstanding htlcs.
diff --git a/itest/lnd_taproot_test.go b/itest/lnd_taproot_test.go
index b30f8cdaab..ddbfb6983a 100644
--- a/itest/lnd_taproot_test.go
+++ b/itest/lnd_taproot_test.go
@@ -46,37 +46,50 @@ var (
))
)
-// testTaproot ensures that the daemon can send to and spend from taproot (p2tr)
-// outputs.
-func testTaproot(ht *lntest.HarnessTest) {
- testTaprootSendCoinsKeySpendBip86(ht, ht.Alice)
- testTaprootComputeInputScriptKeySpendBip86(ht, ht.Alice)
- testTaprootSignOutputRawScriptSpend(ht, ht.Alice)
+// testTaprootSpend ensures that the daemon can send to and spend from taproot
+// (p2tr) outputs.
+func testTaprootSpend(ht *lntest.HarnessTest) {
+ alice := ht.NewNode("Alice", nil)
+
+ testTaprootSendCoinsKeySpendBip86(ht, alice)
+ testTaprootComputeInputScriptKeySpendBip86(ht, alice)
+ testTaprootSignOutputRawScriptSpend(ht, alice)
testTaprootSignOutputRawScriptSpend(
- ht, ht.Alice, txscript.SigHashSingle,
+ ht, alice, txscript.SigHashSingle,
)
- testTaprootSignOutputRawKeySpendBip86(ht, ht.Alice)
+ testTaprootSignOutputRawKeySpendBip86(ht, alice)
testTaprootSignOutputRawKeySpendBip86(
- ht, ht.Alice, txscript.SigHashSingle,
+ ht, alice, txscript.SigHashSingle,
)
- testTaprootSignOutputRawKeySpendRootHash(ht, ht.Alice)
+ testTaprootSignOutputRawKeySpendRootHash(ht, alice)
+}
+
+// testTaprootMuSig2 ensures that the daemon can send to and spend from taproot
+// (p2tr) outputs using musig2.
+func testTaprootMuSig2(ht *lntest.HarnessTest) {
+ alice := ht.NewNodeWithCoins("Alice", nil)
muSig2Versions := []signrpc.MuSig2Version{
signrpc.MuSig2Version_MUSIG2_VERSION_V040,
signrpc.MuSig2Version_MUSIG2_VERSION_V100RC2,
}
for _, version := range muSig2Versions {
- testTaprootMuSig2KeySpendBip86(ht, ht.Alice, version)
- testTaprootMuSig2KeySpendRootHash(ht, ht.Alice, version)
- testTaprootMuSig2ScriptSpend(ht, ht.Alice, version)
- testTaprootMuSig2CombinedLeafKeySpend(ht, ht.Alice, version)
- testMuSig2CombineKey(ht, ht.Alice, version)
+ testTaprootMuSig2KeySpendBip86(ht, alice, version)
+ testTaprootMuSig2KeySpendRootHash(ht, alice, version)
+ testTaprootMuSig2ScriptSpend(ht, alice, version)
+ testTaprootMuSig2CombinedLeafKeySpend(ht, alice, version)
+ testMuSig2CombineKey(ht, alice, version)
}
+}
+
+// testTaprootImportScripts ensures that the daemon can import taproot scripts.
+func testTaprootImportScripts(ht *lntest.HarnessTest) {
+ alice := ht.NewNodeWithCoins("Alice", nil)
- testTaprootImportTapscriptFullTree(ht, ht.Alice)
- testTaprootImportTapscriptPartialReveal(ht, ht.Alice)
- testTaprootImportTapscriptRootHashOnly(ht, ht.Alice)
- testTaprootImportTapscriptFullKey(ht, ht.Alice)
+ testTaprootImportTapscriptFullTree(ht, alice)
+ testTaprootImportTapscriptPartialReveal(ht, alice)
+ testTaprootImportTapscriptRootHashOnly(ht, alice)
+ testTaprootImportTapscriptFullKey(ht, alice)
}
// testTaprootSendCoinsKeySpendBip86 tests sending to and spending from
diff --git a/itest/lnd_test.go b/itest/lnd_test.go
index 5b9105694d..4d56b73fb8 100644
--- a/itest/lnd_test.go
+++ b/itest/lnd_test.go
@@ -8,7 +8,6 @@ import (
"os"
"path/filepath"
"runtime"
- "strings"
"testing"
"time"
@@ -20,6 +19,7 @@ import (
"github.com/lightningnetwork/lnd/lntest/port"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/stretchr/testify/require"
+ "golang.org/x/exp/rand"
"google.golang.org/grpc/grpclog"
)
@@ -61,6 +61,13 @@ var (
"0-based index specified by the -runtranche flag",
)
+ // shuffleSeedFlag is the source of randomness used to shuffle the test
+ // cases. If not specified, the test cases won't be shuffled.
+ shuffleSeedFlag = flag.Uint64(
+ "shuffleseed", 0, "if set, shuffles the test cases using this "+
+ "as the source of randomness",
+ )
+
// testCasesRunTranche is the 0-based index of the split test cases
// tranche to run in the current invocation.
testCasesRunTranche = flag.Uint(
@@ -102,9 +109,8 @@ func TestLightningNetworkDaemon(t *testing.T) {
)
defer harnessTest.Stop()
- // Setup standby nodes, Alice and Bob, which will be alive and shared
- // among all the test cases.
- harnessTest.SetupStandbyNodes()
+ // Get the current block height.
+ height := harnessTest.CurrentHeight()
// Run the subset of the test cases selected in this tranche.
for idx, testCase := range testCases {
@@ -119,22 +125,7 @@ func TestLightningNetworkDaemon(t *testing.T) {
// avoid overwriting the external harness test that is
// tied to the parent test.
ht := harnessTest.Subtest(t1)
-
- // TODO(yy): split log files.
- cleanTestCaseName := strings.ReplaceAll(
- testCase.Name, " ", "_",
- )
- ht.SetTestName(cleanTestCaseName)
-
- logLine := fmt.Sprintf(
- "STARTING ============ %v ============\n",
- testCase.Name,
- )
-
- ht.Alice.AddToLogf(logLine)
- ht.Bob.AddToLogf(logLine)
-
- ht.EnsureConnected(ht.Alice, ht.Bob)
+ ht.SetTestName(testCase.Name)
ht.RunTestCase(testCase)
})
@@ -151,9 +142,35 @@ func TestLightningNetworkDaemon(t *testing.T) {
}
}
- height := harnessTest.CurrentHeight()
- t.Logf("=========> tests finished for tranche: %v, tested %d "+
- "cases, end height: %d\n", trancheIndex, len(testCases), height)
+ fmt.Printf("=========> tranche %v finished, tested %d cases, mined "+
+ "blocks: %d\n", trancheIndex, len(testCases),
+ harnessTest.CurrentHeight()-height)
+}
+
+// maybeShuffleTestCases shuffles the test cases if the flag `shuffleseed` is
+// set and not 0. In parallel tests we want to shuffle the test cases so they
+// are executed in a random order. This is done to even out the blocks mined in
+// each test tranche so they can run faster.
+//
+// NOTE: Because the parallel tests are initialized with the same seed (job
+// ID), they will always have the same order.
+func maybeShuffleTestCases() {
+ // Exit if not set.
+ if shuffleSeedFlag == nil {
+ return
+ }
+
+ // Exit if set to 0.
+ if *shuffleSeedFlag == 0 {
+ return
+ }
+
+ // Init the seed and shuffle the test cases.
+ rand.Seed(*shuffleSeedFlag)
+ rand.Shuffle(len(allTestCases), func(i, j int) {
+ allTestCases[i], allTestCases[j] =
+ allTestCases[j], allTestCases[i]
+ })
}
// getTestCaseSplitTranche returns the sub slice of the test cases that should
@@ -178,6 +195,9 @@ func getTestCaseSplitTranche() ([]*lntest.TestCase, uint, uint) {
runTranche = 0
}
+ // Shuffle the test cases if the `shuffleseed` flag is set.
+ maybeShuffleTestCases()
+
numCases := uint(len(allTestCases))
testsPerTranche := numCases / numTranches
trancheOffset := runTranche * testsPerTranche
@@ -212,6 +232,11 @@ func getLndBinary(t *testing.T) string {
return binary
}
+// isWindowsOS returns true if the test is running on a Windows OS.
+func isWindowsOS() bool {
+ return runtime.GOOS == "windows"
+}
+
func init() {
// Before we start any node, we need to make sure that any btcd node
// that is started through the RPC harness uses a unique port as well
diff --git a/itest/lnd_trackpayments_test.go b/itest/lnd_trackpayments_test.go
index b942cd4c72..29fb2c0b3a 100644
--- a/itest/lnd_trackpayments_test.go
+++ b/itest/lnd_trackpayments_test.go
@@ -2,7 +2,6 @@ package itest
import (
"encoding/hex"
- "time"
"github.com/btcsuite/btcd/btcutil"
"github.com/lightningnetwork/lnd/lnrpc"
@@ -14,21 +13,18 @@ import (
// testTrackPayments tests whether a client that calls the TrackPayments api
// receives payment updates.
func testTrackPayments(ht *lntest.HarnessTest) {
- alice, bob := ht.Alice, ht.Bob
-
- // Restart Alice with the new flag so she understands the new payment
+ // Create Alice with the new flag so she understands the new payment
// status.
- ht.RestartNodeWithExtraArgs(alice, []string{
- "--routerrpc.usestatusinitiated",
- })
+ cfgAlice := []string{"--routerrpc.usestatusinitiated"}
+ cfgs := [][]string{cfgAlice, nil}
- // Open a channel between alice and bob.
- ht.EnsureConnected(alice, bob)
- channel := ht.OpenChannel(
- alice, bob, lntest.OpenChannelParams{
+ // Create a channel Alice->Bob.
+ _, nodes := ht.CreateSimpleNetwork(
+ cfgs, lntest.OpenChannelParams{
Amt: btcutil.Amount(300000),
},
)
+ alice, bob := nodes[0], nodes[1]
// Call the TrackPayments api to listen for payment updates.
req := &routerrpc.TrackPaymentsRequest{
@@ -88,28 +84,18 @@ func testTrackPayments(ht *lntest.HarnessTest) {
require.Equal(ht, amountMsat, update3.ValueMsat)
require.Equal(ht, hex.EncodeToString(invoice.RPreimage),
update3.PaymentPreimage)
-
- // TODO(yy): remove the sleep once the following bug is fixed.
- // When the invoice is reported settled, the commitment dance is not
- // yet finished, which can cause an error when closing the channel,
- // saying there's active HTLCs. We need to investigate this issue and
- // reverse the order to, first finish the commitment dance, then report
- // the invoice as settled.
- time.Sleep(2 * time.Second)
-
- ht.CloseChannel(alice, channel)
}
// testTrackPaymentsCompatible checks that when `routerrpc.usestatusinitiated`
// is not set, the new Payment_INITIATED is replaced with Payment_IN_FLIGHT.
func testTrackPaymentsCompatible(ht *lntest.HarnessTest) {
// Open a channel between alice and bob.
- alice, bob := ht.Alice, ht.Bob
- channel := ht.OpenChannel(
- alice, bob, lntest.OpenChannelParams{
+ _, nodes := ht.CreateSimpleNetwork(
+ [][]string{nil, nil}, lntest.OpenChannelParams{
Amt: btcutil.Amount(300000),
},
)
+ alice, bob := nodes[0], nodes[1]
// Call the TrackPayments api to listen for payment updates.
req := &routerrpc.TrackPaymentsRequest{
@@ -163,14 +149,4 @@ func testTrackPaymentsCompatible(ht *lntest.HarnessTest) {
payment3, err := paymentClient.Recv()
require.NoError(ht, err, "unable to get payment update")
require.Equal(ht, lnrpc.Payment_SUCCEEDED, payment3.Status)
-
- // TODO(yy): remove the sleep once the following bug is fixed.
- // When the invoice is reported settled, the commitment dance is not
- // yet finished, which can cause an error when closing the channel,
- // saying there's active HTLCs. We need to investigate this issue and
- // reverse the order to, first finish the commitment dance, then report
- // the invoice as settled.
- time.Sleep(2 * time.Second)
-
- ht.CloseChannel(alice, channel)
}
diff --git a/itest/lnd_wallet_import_test.go b/itest/lnd_wallet_import_test.go
index f4acc6365b..deb8994687 100644
--- a/itest/lnd_wallet_import_test.go
+++ b/itest/lnd_wallet_import_test.go
@@ -23,6 +23,55 @@ import (
"github.com/stretchr/testify/require"
)
+// walletImportAccountTestCases tests that an imported account can fund
+// transactions and channels through PSBTs, by having one node (the one with
+// the imported account) craft the transactions and another node act as the
+// signer.
+//
+//nolint:lll
+var walletImportAccountTestCases = []*lntest.TestCase{
+ {
+ Name: "wallet import account standard BIP-0044",
+ TestFunc: func(ht *lntest.HarnessTest) {
+ testWalletImportAccountScenario(
+ ht, walletrpc.AddressType_WITNESS_PUBKEY_HASH,
+ )
+ },
+ },
+ {
+ Name: "wallet import account standard BIP-0049",
+ TestFunc: func(ht *lntest.HarnessTest) {
+ testWalletImportAccountScenario(
+ ht, walletrpc.AddressType_NESTED_WITNESS_PUBKEY_HASH,
+ )
+ },
+ },
+ {
+ Name: "wallet import account lnd BIP-0049 variant",
+ TestFunc: func(ht *lntest.HarnessTest) {
+ testWalletImportAccountScenario(
+ ht, walletrpc.AddressType_HYBRID_NESTED_WITNESS_PUBKEY_HASH,
+ )
+ },
+ },
+ {
+ Name: "wallet import account standard BIP-0084",
+ TestFunc: func(ht *lntest.HarnessTest) {
+ testWalletImportAccountScenario(
+ ht, walletrpc.AddressType_WITNESS_PUBKEY_HASH,
+ )
+ },
+ },
+ {
+ Name: "wallet import account standard BIP-0086",
+ TestFunc: func(ht *lntest.HarnessTest) {
+ testWalletImportAccountScenario(
+ ht, walletrpc.AddressType_TAPROOT_PUBKEY,
+ )
+ },
+ },
+}
+
const (
defaultAccount = lnwallet.DefaultAccountName
defaultImportedAccount = waddrmgr.ImportedAddrAccountName
@@ -452,65 +501,6 @@ func fundChanAndCloseFromImportedAccount(ht *lntest.HarnessTest, srcNode,
}
}
-// testWalletImportAccount tests that an imported account can fund transactions
-// and channels through PSBTs, by having one node (the one with the imported
-// account) craft the transactions and another node act as the signer.
-func testWalletImportAccount(ht *lntest.HarnessTest) {
- testCases := []struct {
- name string
- addrType walletrpc.AddressType
- }{
- {
- name: "standard BIP-0044",
- addrType: walletrpc.AddressType_WITNESS_PUBKEY_HASH,
- },
- {
- name: "standard BIP-0049",
- addrType: walletrpc.
- AddressType_NESTED_WITNESS_PUBKEY_HASH,
- },
- {
- name: "lnd BIP-0049 variant",
- addrType: walletrpc.
- AddressType_HYBRID_NESTED_WITNESS_PUBKEY_HASH,
- },
- {
- name: "standard BIP-0084",
- addrType: walletrpc.AddressType_WITNESS_PUBKEY_HASH,
- },
- {
- name: "standard BIP-0086",
- addrType: walletrpc.AddressType_TAPROOT_PUBKEY,
- },
- }
-
- for _, tc := range testCases {
- tc := tc
- success := ht.Run(tc.name, func(tt *testing.T) {
- testFunc := func(ht *lntest.HarnessTest) {
- testWalletImportAccountScenario(
- ht, tc.addrType,
- )
- }
-
- st := ht.Subtest(tt)
-
- st.RunTestCase(&lntest.TestCase{
- Name: tc.name,
- TestFunc: testFunc,
- })
- })
- if !success {
- // Log failure time to help relate the lnd logs to the
- // failure.
- ht.Logf("Failure time: %v", time.Now().Format(
- "2006-01-02 15:04:05.000",
- ))
- break
- }
- }
-}
-
func testWalletImportAccountScenario(ht *lntest.HarnessTest,
addrType walletrpc.AddressType) {
@@ -582,7 +572,7 @@ func runWalletImportAccountScenario(ht *lntest.HarnessTest,
// Send coins to Carol's address and confirm them, making sure the
// balance updates accordingly.
- alice := ht.Alice
+ alice := ht.NewNodeWithCoins("Alice", nil)
req := &lnrpc.SendCoinsRequest{
Addr: externalAddr,
Amount: utxoAmt,
@@ -694,7 +684,7 @@ func testWalletImportPubKeyScenario(ht *lntest.HarnessTest,
addrType walletrpc.AddressType) {
const utxoAmt int64 = btcutil.SatoshiPerBitcoin
- alice := ht.Alice
+ alice := ht.NewNodeWithCoins("Alice", nil)
// We'll start our test by having two nodes, Carol and Dave.
//
diff --git a/itest/lnd_watchtower_test.go b/itest/lnd_watchtower_test.go
index 0a36b077d2..1f5ad6ea31 100644
--- a/itest/lnd_watchtower_test.go
+++ b/itest/lnd_watchtower_test.go
@@ -19,27 +19,28 @@ import (
"github.com/stretchr/testify/require"
)
-// testWatchtower tests the behaviour of the watchtower client and server.
-func testWatchtower(ht *lntest.HarnessTest) {
- ht.Run("revocation", func(t *testing.T) {
- tt := ht.Subtest(t)
- testRevokedCloseRetributionAltruistWatchtower(tt)
- })
-
- ht.Run("session deletion", func(t *testing.T) {
- tt := ht.Subtest(t)
- testTowerClientSessionDeletion(tt)
- })
-
- ht.Run("tower and session activation", func(t *testing.T) {
- tt := ht.Subtest(t)
- testTowerClientTowerAndSessionManagement(tt)
- })
+// watchtowerTestCases defines a set of tests to check the behaviour of the
+// watchtower client and server.
+var watchtowerTestCases = []*lntest.TestCase{
+ {
+ Name: "watchtower revoked close retribution altruist",
+ TestFunc: testRevokedCloseRetributionAltruistWatchtower,
+ },
+ {
+ Name: "watchtower client session deletion",
+ TestFunc: testTowerClientSessionDeletion,
+ },
+ {
+ Name: "watchtower client tower and session management",
+ TestFunc: testTowerClientTowerAndSessionManagement,
+ },
}
// testTowerClientTowerAndSessionManagement tests the various control commands
// that a user has over the client's set of active towers and sessions.
func testTowerClientTowerAndSessionManagement(ht *lntest.HarnessTest) {
+ alice := ht.NewNode("Alice", nil)
+
const (
chanAmt = funding.MaxBtcFundingAmount
externalIP = "1.2.3.4"
@@ -105,13 +106,13 @@ func testTowerClientTowerAndSessionManagement(ht *lntest.HarnessTest) {
ht.FundCoins(btcutil.SatoshiPerBitcoin, dave)
// Connect Dave and Alice.
- ht.ConnectNodes(dave, ht.Alice)
+ ht.ConnectNodes(dave, alice)
// Open a channel between Dave and Alice.
params := lntest.OpenChannelParams{
Amt: chanAmt,
}
- chanPoint := ht.OpenChannel(dave, ht.Alice, params)
+ chanPoint := ht.OpenChannel(dave, alice, params)
// Show that the Wallis tower is currently seen as an active session
// candidate.
@@ -123,7 +124,7 @@ func testTowerClientTowerAndSessionManagement(ht *lntest.HarnessTest) {
// Make some back-ups and assert that they are added to a session with
// the tower.
- generateBackups(ht, dave, ht.Alice, 4)
+ generateBackups(ht, dave, alice, 4)
// Assert that one of the sessions now has 4 backups.
assertNumBackups(ht, dave.RPC, wallisPk, 4, false)
@@ -140,7 +141,7 @@ func testTowerClientTowerAndSessionManagement(ht *lntest.HarnessTest) {
require.False(ht, info.SessionInfo[0].ActiveSessionCandidate)
// Back up a few more states.
- generateBackups(ht, dave, ht.Alice, 4)
+ generateBackups(ht, dave, alice, 4)
// These should _not_ be on the tower. Therefore, the number of
// back-ups on the tower should be the same as before.
@@ -164,7 +165,7 @@ func testTowerClientTowerAndSessionManagement(ht *lntest.HarnessTest) {
})
// Generate some more back-ups.
- generateBackups(ht, dave, ht.Alice, 4)
+ generateBackups(ht, dave, alice, 4)
// Assert that they get added to the first tower (Wallis) and that the
// number of sessions with Wallis has not changed - in other words, the
@@ -206,7 +207,7 @@ func testTowerClientTowerAndSessionManagement(ht *lntest.HarnessTest) {
assertNumSessions(wallisPk, 4, false)
// Any new back-ups should now be backed up on a different session.
- generateBackups(ht, dave, ht.Alice, 2)
+ generateBackups(ht, dave, alice, 2)
assertNumBackups(ht, dave.RPC, wallisPk, 10, false)
findSession(wallisPk, 2)
@@ -239,6 +240,8 @@ func testTowerClientTowerAndSessionManagement(ht *lntest.HarnessTest) {
// testTowerClientSessionDeletion tests that sessions are correctly deleted
// when they are deemed closable.
func testTowerClientSessionDeletion(ht *lntest.HarnessTest) {
+ alice := ht.NewNode("Alice", nil)
+
const (
chanAmt = funding.MaxBtcFundingAmount
numInvoices = 5
@@ -291,18 +294,18 @@ func testTowerClientSessionDeletion(ht *lntest.HarnessTest) {
ht.FundCoins(btcutil.SatoshiPerBitcoin, dave)
// Connect Dave and Alice.
- ht.ConnectNodes(dave, ht.Alice)
+ ht.ConnectNodes(dave, alice)
// Open a channel between Dave and Alice.
params := lntest.OpenChannelParams{
Amt: chanAmt,
}
- chanPoint := ht.OpenChannel(dave, ht.Alice, params)
+ chanPoint := ht.OpenChannel(dave, alice, params)
// Since there are 2 updates made for every payment and the maximum
// number of updates per session has been set to 10, make 5 payments
// between the pair so that the session is exhausted.
- generateBackups(ht, dave, ht.Alice, maxUpdates)
+ generateBackups(ht, dave, alice, maxUpdates)
// Assert that one of the sessions now has 10 backups.
assertNumBackups(ht, dave.RPC, wallisPk, 10, false)
@@ -392,7 +395,7 @@ func testRevokedCloseRetributionAltruistWatchtowerCase(ht *lntest.HarnessTest,
// protection logic automatically.
daveArgs := lntest.NodeArgsForCommitType(commitType)
daveArgs = append(daveArgs, "--nolisten", "--wtclient.active")
- dave := ht.NewNode("Dave", daveArgs)
+ dave := ht.NewNodeWithCoins("Dave", daveArgs)
addTowerReq := &wtclientrpc.AddTowerRequest{
Pubkey: willyInfoPk,
@@ -404,10 +407,6 @@ func testRevokedCloseRetributionAltruistWatchtowerCase(ht *lntest.HarnessTest,
// announcement, so we open a channel with Carol,
ht.ConnectNodes(dave, carol)
- // Before we make a channel, we'll load up Dave with some coins sent
- // directly from the miner.
- ht.FundCoins(btcutil.SatoshiPerBitcoin, dave)
-
// Send one more UTXOs if this is a neutrino backend.
if ht.IsNeutrinoBackend() {
ht.FundCoins(btcutil.SatoshiPerBitcoin, dave)
@@ -565,6 +564,15 @@ func testRevokedCloseRetributionAltruistWatchtowerCase(ht *lntest.HarnessTest,
// then been swept to his wallet by Willy.
require.NoError(ht, restart(), "unable to restart dave")
+ // For neutrino backend, we may need to mine one more block to trigger
+ // the chain watcher to act.
+ //
+ // TODO(yy): remove it once the blockbeat remembers the last block
+ // processed.
+ if ht.IsNeutrinoBackend() {
+ ht.MineEmptyBlocks(1)
+ }
+
err = wait.NoError(func() error {
daveBalResp := dave.RPC.ChannelBalance()
if daveBalResp.LocalBalance.Sat != 0 {
@@ -579,16 +587,6 @@ func testRevokedCloseRetributionAltruistWatchtowerCase(ht *lntest.HarnessTest,
ht.AssertNumPendingForceClose(dave, 0)
- // If this is an anchor channel, Dave would offer his sweeper the
- // anchor. However, due to no time-sensitive outputs involved, the
- // anchor sweeping won't happen as it's uneconomical.
- if lntest.CommitTypeHasAnchors(commitType) {
- ht.AssertNumPendingSweeps(dave, 1)
-
- // Mine a block to trigger the sweep.
- ht.MineEmptyBlocks(1)
- }
-
// Check that Dave's wallet balance is increased.
err = wait.NoError(func() error {
daveBalResp := dave.RPC.WalletBalance()
@@ -657,17 +655,12 @@ func generateBackups(ht *lntest.HarnessTest, srcNode,
)
send := func(node *node.HarnessNode, payReq string) {
- stream := node.RPC.SendPayment(
- &routerrpc.SendPaymentRequest{
- PaymentRequest: payReq,
- TimeoutSeconds: 60,
- FeeLimitMsat: noFeeLimitMsat,
- },
- )
-
- ht.AssertPaymentStatusFromStream(
- stream, lnrpc.Payment_SUCCEEDED,
- )
+ req := &routerrpc.SendPaymentRequest{
+ PaymentRequest: payReq,
+ TimeoutSeconds: 60,
+ FeeLimitMsat: noFeeLimitMsat,
+ }
+ ht.SendPaymentAssertSettled(node, req)
}
// Pay each invoice.
diff --git a/itest/lnd_wipe_fwdpkgs_test.go b/itest/lnd_wipe_fwdpkgs_test.go
index a302f596a6..fda9916f9a 100644
--- a/itest/lnd_wipe_fwdpkgs_test.go
+++ b/itest/lnd_wipe_fwdpkgs_test.go
@@ -27,25 +27,11 @@ func testWipeForwardingPackages(ht *lntest.HarnessTest) {
numInvoices = 3
)
- // Grab Alice and Bob from HarnessTest.
- alice, bob := ht.Alice, ht.Bob
-
- // Create a new node Carol, which will create invoices that require
- // Alice to pay.
- carol := ht.NewNode("Carol", nil)
-
- // Connect Bob to Carol.
- ht.ConnectNodes(bob, carol)
-
- // Open a channel between Alice and Bob.
- chanPointAB := ht.OpenChannel(
- alice, bob, lntest.OpenChannelParams{Amt: chanAmt},
- )
-
- // Open a channel between Bob and Carol.
- chanPointBC := ht.OpenChannel(
- bob, carol, lntest.OpenChannelParams{Amt: chanAmt},
+ chanPoints, nodes := ht.CreateSimpleNetwork(
+ [][]string{nil, nil, nil}, lntest.OpenChannelParams{Amt: chanAmt},
)
+ chanPointAB, chanPointBC := chanPoints[0], chanPoints[1]
+ alice, bob, carol := nodes[0], nodes[1], nodes[2]
// Before we continue, make sure Alice has seen the channel between Bob
// and Carol.
@@ -117,12 +103,6 @@ func testWipeForwardingPackages(ht *lntest.HarnessTest) {
// Alice should one pending sweep.
ht.AssertNumPendingSweeps(alice, 1)
- // Mine a block to trigger the sweep.
- ht.MineBlocks(1)
-
// Mine 1 block to get Alice's sweeping tx confirmed.
ht.MineBlocksAndAssertNumTxes(1, 1)
-
- // Clean up the force closed channel.
- ht.CleanupForceClose(bob)
}
diff --git a/itest/lnd_wumbo_channels_test.go b/itest/lnd_wumbo_channels_test.go
index 18d170acc3..8bf18106e7 100644
--- a/itest/lnd_wumbo_channels_test.go
+++ b/itest/lnd_wumbo_channels_test.go
@@ -45,8 +45,7 @@ func testWumboChannels(ht *lntest.HarnessTest) {
// Creating a wumbo channel between these two nodes should succeed.
ht.EnsureConnected(wumboNode, wumboNode2)
- chanPoint := ht.OpenChannel(
+ ht.OpenChannel(
wumboNode, wumboNode2, lntest.OpenChannelParams{Amt: chanAmt},
)
- ht.CloseChannel(wumboNode, chanPoint)
}
diff --git a/itest/lnd_zero_conf_test.go b/itest/lnd_zero_conf_test.go
index 6a658ab381..5b87846be5 100644
--- a/itest/lnd_zero_conf_test.go
+++ b/itest/lnd_zero_conf_test.go
@@ -19,6 +19,67 @@ import (
"github.com/stretchr/testify/require"
)
+// zeroConfPolicyTestCases checks that option-scid-alias, zero-conf
+// channel-types, and option-scid-alias feature-bit-only channels have the
+// expected graph and that payments work when updating the channel policy.
+var zeroConfPolicyTestCases = []*lntest.TestCase{
+ {
+ Name: "channel policy update private",
+ TestFunc: func(ht *lntest.HarnessTest) {
+ // zeroConf: false
+ // scidAlias: false
+ // private: true
+ testPrivateUpdateAlias(
+ ht, false, false, true,
+ )
+ },
+ },
+ {
+ Name: "channel policy update private scid alias",
+ TestFunc: func(ht *lntest.HarnessTest) {
+ // zeroConf: false
+ // scidAlias: true
+ // private: true
+ testPrivateUpdateAlias(
+ ht, false, true, true,
+ )
+ },
+ },
+ {
+ Name: "channel policy update private zero conf",
+ TestFunc: func(ht *lntest.HarnessTest) {
+ // zeroConf: true
+ // scidAlias: false
+ // private: true
+ testPrivateUpdateAlias(
+ ht, true, false, true,
+ )
+ },
+ },
+ {
+ Name: "channel policy update public zero conf",
+ TestFunc: func(ht *lntest.HarnessTest) {
+ // zeroConf: true
+ // scidAlias: false
+ // private: false
+ testPrivateUpdateAlias(
+ ht, true, false, false,
+ )
+ },
+ },
+ {
+ Name: "channel policy update public",
+ TestFunc: func(ht *lntest.HarnessTest) {
+ // zeroConf: false
+ // scidAlias: false
+ // private: false
+ testPrivateUpdateAlias(
+ ht, false, false, false,
+ )
+ },
+ },
+}
+
// testZeroConfChannelOpen tests that opening a zero-conf channel works and
// sending payments also works.
func testZeroConfChannelOpen(ht *lntest.HarnessTest) {
@@ -395,61 +456,6 @@ func waitForZeroConfGraphChange(hn *node.HarnessNode,
}, defaultTimeout)
}
-// testUpdateChannelPolicyScidAlias checks that option-scid-alias, zero-conf
-// channel-types, and option-scid-alias feature-bit-only channels have the
-// expected graph and that payments work when updating the channel policy.
-func testUpdateChannelPolicyScidAlias(ht *lntest.HarnessTest) {
- tests := []struct {
- name string
-
- // The option-scid-alias channel type.
- scidAliasType bool
-
- // The zero-conf channel type.
- zeroConf bool
-
- private bool
- }{
- {
- name: "private scid-alias chantype update",
- scidAliasType: true,
- private: true,
- },
- {
- name: "private zero-conf update",
- zeroConf: true,
- private: true,
- },
- {
- name: "public zero-conf update",
- zeroConf: true,
- },
- {
- name: "public no-chan-type update",
- },
- {
- name: "private no-chan-type update",
- private: true,
- },
- }
-
- for _, test := range tests {
- test := test
-
- success := ht.Run(test.name, func(t *testing.T) {
- st := ht.Subtest(t)
-
- testPrivateUpdateAlias(
- st, test.zeroConf, test.scidAliasType,
- test.private,
- )
- })
- if !success {
- return
- }
- }
-}
-
func testPrivateUpdateAlias(ht *lntest.HarnessTest,
zeroConf, scidAliasType, private bool) {
@@ -621,6 +627,9 @@ func testPrivateUpdateAlias(ht *lntest.HarnessTest,
//
// TODO(yy): further investigate this sleep.
time.Sleep(time.Second * 5)
+
+ // Make sure Eve has heard about this public channel.
+ ht.AssertChannelInGraph(eve, fundingPoint2)
}
// Dave creates an invoice that Eve will pay.
@@ -753,7 +762,7 @@ func testPrivateUpdateAlias(ht *lntest.HarnessTest,
// testOptionScidUpgrade tests that toggling the option-scid-alias feature bit
// correctly upgrades existing channels.
func testOptionScidUpgrade(ht *lntest.HarnessTest) {
- bob := ht.Bob
+ bob := ht.NewNodeWithCoins("Bob", nil)
// Start carol with anchors only.
carolArgs := []string{
@@ -854,9 +863,6 @@ func testOptionScidUpgrade(ht *lntest.HarnessTest) {
daveInvoice2 := dave.RPC.AddInvoice(daveParams)
ht.CompletePaymentRequests(bob, []string{daveInvoice2.PaymentRequest})
-
- // Close standby node's channels.
- ht.CloseChannel(bob, fundingPoint2)
}
// acceptChannel is used to accept a single channel that comes across. This
diff --git a/lntest/harness.go b/lntest/harness.go
index f96a3aadd7..03b0d4b316 100644
--- a/lntest/harness.go
+++ b/lntest/harness.go
@@ -4,20 +4,24 @@ import (
"context"
"encoding/hex"
"fmt"
+ "strings"
"testing"
"time"
"github.com/btcsuite/btcd/blockchain"
+ "github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/fn"
+ "github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/kvdb/etcd"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
+ "github.com/lightningnetwork/lnd/lnrpc/signrpc"
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
"github.com/lightningnetwork/lnd/lntest/miner"
"github.com/lightningnetwork/lnd/lntest/node"
@@ -26,6 +30,7 @@ import (
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
"github.com/lightningnetwork/lnd/lnwire"
+ "github.com/lightningnetwork/lnd/routing"
"github.com/stretchr/testify/require"
)
@@ -49,6 +54,9 @@ const (
// maxBlocksAllowed specifies the max allowed value to be used when
// mining blocks.
maxBlocksAllowed = 100
+
+ finalCltvDelta = routing.MinCLTVDelta // 18.
+ thawHeightDelta = finalCltvDelta * 2 // 36.
)
// TestCase defines a test case that's been used in the integration test.
@@ -60,24 +68,12 @@ type TestCase struct {
TestFunc func(t *HarnessTest)
}
-// standbyNodes are a list of nodes which are created during the initialization
-// of the test and used across all test cases.
-type standbyNodes struct {
- // Alice and Bob are the initial seeder nodes that are automatically
- // created to be the initial participants of the test network.
- Alice *node.HarnessNode
- Bob *node.HarnessNode
-}
-
// HarnessTest builds on top of a testing.T with enhanced error detection. It
// is responsible for managing the interactions among different nodes, and
// providing easy-to-use assertions.
type HarnessTest struct {
*testing.T
- // Embed the standbyNodes so we can easily access them via `ht.Alice`.
- standbyNodes
-
// miner is a reference to a running full node that can be used to
// create new blocks on the network.
miner *miner.HarnessMiner
@@ -264,97 +260,6 @@ func (h *HarnessTest) createAndSendOutput(target *node.HarnessNode,
h.miner.SendOutput(output, defaultMinerFeeRate)
}
-// SetupRemoteSigningStandbyNodes starts the initial seeder nodes within the
-// test harness in a remote signing configuration. The initial node's wallets
-// will be funded wallets with 100x1 BTC outputs each.
-func (h *HarnessTest) SetupRemoteSigningStandbyNodes() {
- h.Log("Setting up standby nodes Alice and Bob with remote " +
- "signing configurations...")
- defer h.Log("Finished the setup, now running tests...")
-
- password := []byte("itestpassword")
-
- // Setup remote signing nodes for Alice and Bob.
- signerAlice := h.NewNode("SignerAlice", nil)
- signerBob := h.NewNode("SignerBob", nil)
-
- // Setup watch-only nodes for Alice and Bob, each configured with their
- // own remote signing instance.
- h.Alice = h.setupWatchOnlyNode("Alice", signerAlice, password)
- h.Bob = h.setupWatchOnlyNode("Bob", signerBob, password)
-
- // Fund each node with 100 BTC (using 100 separate transactions).
- const fundAmount = 1 * btcutil.SatoshiPerBitcoin
- const numOutputs = 100
- const totalAmount = fundAmount * numOutputs
- for _, node := range []*node.HarnessNode{h.Alice, h.Bob} {
- h.manager.standbyNodes[node.Cfg.NodeID] = node
- for i := 0; i < numOutputs; i++ {
- h.createAndSendOutput(
- node, fundAmount,
- lnrpc.AddressType_WITNESS_PUBKEY_HASH,
- )
- }
- }
-
- // We generate several blocks in order to give the outputs created
- // above a good number of confirmations.
- const totalTxes = 200
- h.MineBlocksAndAssertNumTxes(numBlocksSendOutput, totalTxes)
-
- // Now we want to wait for the nodes to catch up.
- h.WaitForBlockchainSync(h.Alice)
- h.WaitForBlockchainSync(h.Bob)
-
- // Now block until both wallets have fully synced up.
- h.WaitForBalanceConfirmed(h.Alice, totalAmount)
- h.WaitForBalanceConfirmed(h.Bob, totalAmount)
-}
-
-// SetUp starts the initial seeder nodes within the test harness. The initial
-// node's wallets will be funded wallets with 10x10 BTC outputs each.
-func (h *HarnessTest) SetupStandbyNodes() {
- h.Log("Setting up standby nodes Alice and Bob...")
- defer h.Log("Finished the setup, now running tests...")
-
- lndArgs := []string{
- "--default-remote-max-htlcs=483",
- "--channel-max-fee-exposure=5000000",
- }
-
- // Start the initial seeder nodes within the test network.
- h.Alice = h.NewNode("Alice", lndArgs)
- h.Bob = h.NewNode("Bob", lndArgs)
-
- // Load up the wallets of the seeder nodes with 100 outputs of 1 BTC
- // each.
- const fundAmount = 1 * btcutil.SatoshiPerBitcoin
- const numOutputs = 100
- const totalAmount = fundAmount * numOutputs
- for _, node := range []*node.HarnessNode{h.Alice, h.Bob} {
- h.manager.standbyNodes[node.Cfg.NodeID] = node
- for i := 0; i < numOutputs; i++ {
- h.createAndSendOutput(
- node, fundAmount,
- lnrpc.AddressType_WITNESS_PUBKEY_HASH,
- )
- }
- }
-
- // We generate several blocks in order to give the outputs created
- // above a good number of confirmations.
- const totalTxes = 200
- h.MineBlocksAndAssertNumTxes(numBlocksSendOutput, totalTxes)
-
- // Now we want to wait for the nodes to catch up.
- h.WaitForBlockchainSync(h.Alice)
- h.WaitForBlockchainSync(h.Bob)
-
- // Now block until both wallets have fully synced up.
- h.WaitForBalanceConfirmed(h.Alice, totalAmount)
- h.WaitForBalanceConfirmed(h.Bob, totalAmount)
-}
-
// Stop stops the test harness.
func (h *HarnessTest) Stop() {
// Do nothing if it's not started.
@@ -392,24 +297,6 @@ func (h *HarnessTest) RunTestCase(testCase *TestCase) {
testCase.TestFunc(h)
}
-// resetStandbyNodes resets all standby nodes by attaching the new testing.T
-// and restarting them with the original config.
-func (h *HarnessTest) resetStandbyNodes(t *testing.T) {
- t.Helper()
-
- for _, hn := range h.manager.standbyNodes {
- // Inherit the testing.T.
- h.T = t
-
- // Reset the config so the node will be using the default
- // config for the coming test. This will also inherit the
- // test's running context.
- h.RestartNodeWithExtraArgs(hn, hn.Cfg.OriginalExtraArgs)
-
- hn.AddToLogf("Finished test case %v", h.manager.currentTestCase)
- }
-}
-
// Subtest creates a child HarnessTest, which inherits the harness net and
// stand by nodes created by the parent test. It will return a cleanup function
// which resets all the standby nodes' configs back to its original state and
@@ -421,7 +308,6 @@ func (h *HarnessTest) Subtest(t *testing.T) *HarnessTest {
T: t,
manager: h.manager,
miner: h.miner,
- standbyNodes: h.standbyNodes,
feeService: h.feeService,
lndErrorChan: make(chan error, lndErrorChanSize),
}
@@ -432,9 +318,6 @@ func (h *HarnessTest) Subtest(t *testing.T) *HarnessTest {
// Inherit the subtest for the miner.
st.miner.T = st.T
- // Reset the standby nodes.
- st.resetStandbyNodes(t)
-
// Reset fee estimator.
st.feeService.Reset()
@@ -449,6 +332,9 @@ func (h *HarnessTest) Subtest(t *testing.T) *HarnessTest {
"mined blocks=%d", st.manager.currentTestCase,
startHeight, endHeight, endHeight-startHeight)
+ fmt.Printf("finished test: %s, %d, %d, %d\n", st.manager.currentTestCase,
+ startHeight, endHeight, endHeight-startHeight)
+
// Don't bother run the cleanups if the test is failed.
if st.Failed() {
st.Log("test failed, skipped cleanup")
@@ -464,14 +350,8 @@ func (h *HarnessTest) Subtest(t *testing.T) *HarnessTest {
return
}
- // When we finish the test, reset the nodes' configs and take a
- // snapshot of each of the nodes' internal states.
- for _, node := range st.manager.standbyNodes {
- st.cleanupStandbyNode(node)
- }
-
// If found running nodes, shut them down.
- st.shutdownNonStandbyNodes()
+ st.shutdownAllNodes()
// We require the mempool to be cleaned from the test.
require.Empty(st, st.miner.GetRawMempool(), "mempool not "+
@@ -491,26 +371,9 @@ func (h *HarnessTest) Subtest(t *testing.T) *HarnessTest {
return st
}
-// shutdownNonStandbyNodes will shutdown any non-standby nodes.
-func (h *HarnessTest) shutdownNonStandbyNodes() {
- h.shutdownNodes(true)
-}
-
// shutdownAllNodes will shutdown all running nodes.
func (h *HarnessTest) shutdownAllNodes() {
- h.shutdownNodes(false)
-}
-
-// shutdownNodes will shutdown any non-standby nodes. If skipStandby is false,
-// all the standby nodes will be shutdown too.
-func (h *HarnessTest) shutdownNodes(skipStandby bool) {
- for nid, node := range h.manager.activeNodes {
- // If it's a standby node, skip.
- _, ok := h.manager.standbyNodes[nid]
- if ok && skipStandby {
- continue
- }
-
+ for _, node := range h.manager.activeNodes {
// The process may not be in a state to always shutdown
// immediately, so we'll retry up to a hard limit to ensure we
// eventually shutdown.
@@ -559,26 +422,14 @@ func (h *HarnessTest) cleanupStandbyNode(hn *node.HarnessNode) {
func (h *HarnessTest) removeConnectionns(hn *node.HarnessNode) {
resp := hn.RPC.ListPeers()
for _, peer := range resp.Peers {
- // Skip disconnecting Alice and Bob.
- switch peer.PubKey {
- case h.Alice.PubKeyStr:
- continue
- case h.Bob.PubKeyStr:
- continue
- }
-
hn.RPC.DisconnectPeer(peer.PubKey)
}
}
// SetTestName set the test case name.
func (h *HarnessTest) SetTestName(name string) {
- h.manager.currentTestCase = name
-
- // Overwrite the old log filename so we can create new log files.
- for _, node := range h.manager.standbyNodes {
- node.Cfg.LogFilenamePrefix = name
- }
+ cleanTestCaseName := strings.ReplaceAll(name, " ", "_")
+ h.manager.currentTestCase = cleanTestCaseName
}
// NewNode creates a new node and asserts its creation. The node is guaranteed
@@ -596,6 +447,42 @@ func (h *HarnessTest) NewNode(name string,
return node
}
+// NewNodeWithCoins creates a new node and asserts its creation. The node is
+// guaranteed to have finished its initialization and all its subservers are
+// started. In addition, 5 UTXO of 1 BTC each are sent to the node.
+func (h *HarnessTest) NewNodeWithCoins(name string,
+ extraArgs []string) *node.HarnessNode {
+
+ node, err := h.manager.newNode(h.T, name, extraArgs, nil, false)
+ require.NoErrorf(h, err, "unable to create new node for %s", name)
+
+ // Start the node.
+ err = node.Start(h.runCtx)
+ require.NoError(h, err, "failed to start node %s", node.Name())
+
+ // Load up the wallets of the node with 5 outputs of 1 BTC each.
+ const (
+ numOutputs = 5
+ fundAmount = 1 * btcutil.SatoshiPerBitcoin
+ totalAmount = fundAmount * numOutputs
+ )
+
+ for i := 0; i < numOutputs; i++ {
+ h.createAndSendOutput(
+ node, fundAmount,
+ lnrpc.AddressType_WITNESS_PUBKEY_HASH,
+ )
+ }
+
+ // Mine a block to confirm the transactions.
+ h.MineBlocksAndAssertNumTxes(1, numOutputs)
+
+ // Now block until the wallet have fully synced up.
+ h.WaitForBalanceConfirmed(node, totalAmount)
+
+ return node
+}
+
// Shutdown shuts down the given node and asserts that no errors occur.
func (h *HarnessTest) Shutdown(node *node.HarnessNode) {
// The process may not be in a state to always shutdown immediately, so
@@ -1464,7 +1351,7 @@ func (h *HarnessTest) fundCoins(amt btcutil.Amount, target *node.HarnessNode,
}
// FundCoins attempts to send amt satoshis from the internal mining node to the
-// targeted lightning node using a P2WKH address. 2 blocks are mined after in
+// targeted lightning node using a P2WKH address. 1 blocks are mined after in
// order to confirm the transaction.
func (h *HarnessTest) FundCoins(amt btcutil.Amount, hn *node.HarnessNode) {
h.fundCoins(amt, hn, lnrpc.AddressType_WITNESS_PUBKEY_HASH, true)
@@ -1653,6 +1540,22 @@ func (h *HarnessTest) CleanupForceClose(hn *node.HarnessNode) {
// Wait for the channel to be marked pending force close.
h.AssertNumPendingForceClose(hn, 1)
+ // Mine enough blocks for the node to sweep its funds from the force
+ // closed channel. The commit sweep resolver is offers the input to the
+ // sweeper when it's force closed, and broadcast the sweep tx at
+ // defaulCSV-1.
+ //
+ // NOTE: we might empty blocks here as we don't know the exact number
+ // of blocks to mine. This may end up mining more blocks than needed.
+ h.MineEmptyBlocks(node.DefaultCSV - 1)
+
+ // Assert there is one pending sweep.
+ h.AssertNumPendingSweeps(hn, 1)
+
+ // The node should now sweep the funds, clean up by mining the sweeping
+ // tx.
+ h.MineBlocksAndAssertNumTxes(1, 1)
+
// Mine blocks to get any second level HTLC resolved. If there are no
// HTLCs, this will behave like h.AssertNumPendingCloseChannels.
h.mineTillForceCloseResolved(hn)
@@ -1724,9 +1627,9 @@ func (h *HarnessTest) RestartNodeAndRestoreDB(hn *node.HarnessNode) {
// closures as the caller doesn't need to mine all the blocks to make sure the
// mempool is empty.
func (h *HarnessTest) CleanShutDown() {
- // First, shutdown all non-standby nodes to prevent new transactions
- // being created and fed into the mempool.
- h.shutdownNonStandbyNodes()
+ // First, shutdown all nodes to prevent new transactions being created
+ // and fed into the mempool.
+ h.shutdownAllNodes()
// Now mine blocks till the mempool is empty.
h.cleanMempool()
@@ -1994,7 +1897,8 @@ func (h *HarnessTest) AssertSweepFound(hn *node.HarnessNode,
return nil
}
- return fmt.Errorf("sweep tx %v not found", sweep)
+ return fmt.Errorf("sweep tx %v not found in resp %v", sweep,
+ sweepResp)
}, wait.DefaultTimeout)
require.NoError(h, err, "%s: timeout checking sweep tx", hn.Name())
}
@@ -2264,6 +2168,10 @@ func acceptChannel(t *testing.T, zeroConf bool, stream rpc.AcceptorClient) {
require.NoError(t, err)
}
+var nodeNames = []string{
+ "Alice", "Bob", "Carol", "Dave", "Eve", "Frank", "Grace", "Heidi",
+}
+
// createNodes creates the number of nodes specified by the number of configs.
// Each node is created using the specified config, the neighbors are
// connected.
@@ -2276,7 +2184,7 @@ func (h *HarnessTest) createNodes(nodeCfgs [][]string) []*node.HarnessNode {
// Create new nodes.
for i, nodeCfg := range nodeCfgs {
- nodeName := fmt.Sprintf("Node%q", string(rune('A'+i)))
+ nodeName := nodeNames[i]
n := h.NewNode(nodeName, nodeCfg)
nodes[i] = n
}
@@ -2308,12 +2216,39 @@ func (h *HarnessTest) openChannelsForNodes(nodes []*node.HarnessNode,
// Sanity check the params.
require.Greater(h, len(nodes), 1, "need at least 2 nodes")
+ // attachFundingShim is a helper closure that optionally attaches a
+ // funding shim to the open channel params and returns it.
+ attachFundingShim := func(
+ nodeA, nodeB *node.HarnessNode) OpenChannelParams {
+
+ // If this channel is not a script enforced lease channel,
+ // we'll do nothing and return the params.
+ leasedType := lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE
+ if p.CommitmentType != leasedType {
+ return p
+ }
+
+ // Otherwise derive the funding shim, attach it to the original
+ // open channel params and return it.
+ minerHeight := h.CurrentHeight()
+ thawHeight := minerHeight + thawHeightDelta
+ fundingShim, _ := h.deriveFundingShim(
+ nodeA, nodeB, p.Amt, thawHeight, true, leasedType,
+ )
+
+ p.FundingShim = fundingShim
+ return p
+ }
+
// Open channels in batch to save blocks mined.
reqs := make([]*OpenChannelRequest, 0, len(nodes)-1)
for i := 0; i < len(nodes)-1; i++ {
nodeA := nodes[i]
nodeB := nodes[i+1]
+ // Optionally attach a funding shim to the open channel params.
+ p = attachFundingShim(nodeA, nodeB)
+
req := &OpenChannelRequest{
Local: nodeA,
Remote: nodeB,
@@ -2364,3 +2299,119 @@ func (h *HarnessTest) openZeroConfChannelsForNodes(nodes []*node.HarnessNode,
return resp
}
+
+// deriveFundingShim creates a channel funding shim by deriving the necessary
+// keys on both sides.
+func (h *HarnessTest) deriveFundingShim(alice, bob *node.HarnessNode,
+ chanSize btcutil.Amount, thawHeight uint32, publish bool,
+ commitType lnrpc.CommitmentType) (*lnrpc.FundingShim,
+ *lnrpc.ChannelPoint) {
+
+ keyLoc := &signrpc.KeyLocator{KeyFamily: 9999}
+ carolFundingKey := alice.RPC.DeriveKey(keyLoc)
+ daveFundingKey := bob.RPC.DeriveKey(keyLoc)
+
+ // Now that we have the multi-sig keys for each party, we can manually
+ // construct the funding transaction. We'll instruct the backend to
+ // immediately create and broadcast a transaction paying out an exact
+ // amount. Normally this would reside in the mempool, but we just
+ // confirm it now for simplicity.
+ var (
+ fundingOutput *wire.TxOut
+ musig2 bool
+ err error
+ )
+ if commitType == lnrpc.CommitmentType_SIMPLE_TAPROOT {
+ var carolKey, daveKey *btcec.PublicKey
+ carolKey, err = btcec.ParsePubKey(carolFundingKey.RawKeyBytes)
+ require.NoError(h, err)
+ daveKey, err = btcec.ParsePubKey(daveFundingKey.RawKeyBytes)
+ require.NoError(h, err)
+
+ _, fundingOutput, err = input.GenTaprootFundingScript(
+ carolKey, daveKey, int64(chanSize),
+ fn.None[chainhash.Hash](),
+ )
+ require.NoError(h, err)
+
+ musig2 = true
+ } else {
+ _, fundingOutput, err = input.GenFundingPkScript(
+ carolFundingKey.RawKeyBytes, daveFundingKey.RawKeyBytes,
+ int64(chanSize),
+ )
+ require.NoError(h, err)
+ }
+
+ var txid *chainhash.Hash
+ targetOutputs := []*wire.TxOut{fundingOutput}
+ if publish {
+ txid = h.SendOutputsWithoutChange(targetOutputs, 5)
+ } else {
+ tx := h.CreateTransaction(targetOutputs, 5)
+
+ txHash := tx.TxHash()
+ txid = &txHash
+ }
+
+ // At this point, we can being our external channel funding workflow.
+ // We'll start by generating a pending channel ID externally that will
+ // be used to track this new funding type.
+ pendingChanID := h.Random32Bytes()
+
+ // Now that we have the pending channel ID, Dave (our responder) will
+ // register the intent to receive a new channel funding workflow using
+ // the pending channel ID.
+ chanPoint := &lnrpc.ChannelPoint{
+ FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
+ FundingTxidBytes: txid[:],
+ },
+ }
+ chanPointShim := &lnrpc.ChanPointShim{
+ Amt: int64(chanSize),
+ ChanPoint: chanPoint,
+ LocalKey: &lnrpc.KeyDescriptor{
+ RawKeyBytes: daveFundingKey.RawKeyBytes,
+ KeyLoc: &lnrpc.KeyLocator{
+ KeyFamily: daveFundingKey.KeyLoc.KeyFamily,
+ KeyIndex: daveFundingKey.KeyLoc.KeyIndex,
+ },
+ },
+ RemoteKey: carolFundingKey.RawKeyBytes,
+ PendingChanId: pendingChanID,
+ ThawHeight: thawHeight,
+ Musig2: musig2,
+ }
+ fundingShim := &lnrpc.FundingShim{
+ Shim: &lnrpc.FundingShim_ChanPointShim{
+ ChanPointShim: chanPointShim,
+ },
+ }
+ bob.RPC.FundingStateStep(&lnrpc.FundingTransitionMsg{
+ Trigger: &lnrpc.FundingTransitionMsg_ShimRegister{
+ ShimRegister: fundingShim,
+ },
+ })
+
+ // If we attempt to register the same shim (has the same pending chan
+ // ID), then we should get an error.
+ bob.RPC.FundingStateStepAssertErr(&lnrpc.FundingTransitionMsg{
+ Trigger: &lnrpc.FundingTransitionMsg_ShimRegister{
+ ShimRegister: fundingShim,
+ },
+ })
+
+ // We'll take the chan point shim we just registered for Dave (the
+ // responder), and swap the local/remote keys before we feed it in as
+ // Carol's funding shim as the initiator.
+ fundingShim.GetChanPointShim().LocalKey = &lnrpc.KeyDescriptor{
+ RawKeyBytes: carolFundingKey.RawKeyBytes,
+ KeyLoc: &lnrpc.KeyLocator{
+ KeyFamily: carolFundingKey.KeyLoc.KeyFamily,
+ KeyIndex: carolFundingKey.KeyLoc.KeyIndex,
+ },
+ }
+ fundingShim.GetChanPointShim().RemoteKey = daveFundingKey.RawKeyBytes
+
+ return fundingShim, chanPoint
+}
diff --git a/lntest/harness_assertion.go b/lntest/harness_assertion.go
index b487e7e32a..c684941f73 100644
--- a/lntest/harness_assertion.go
+++ b/lntest/harness_assertion.go
@@ -19,7 +19,6 @@ import (
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/lightningnetwork/lnd/channeldb"
- "github.com/lightningnetwork/lnd/fn"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
@@ -29,6 +28,7 @@ import (
"github.com/lightningnetwork/lnd/lntest/rpc"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lntypes"
+ "github.com/lightningnetwork/lnd/lnutils"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
)
@@ -240,59 +240,6 @@ func (h *HarnessTest) EnsureConnected(a, b *node.HarnessNode) {
h.AssertPeerConnected(b, a)
}
-// AssertNumActiveEdges checks that an expected number of active edges can be
-// found in the node specified.
-func (h *HarnessTest) AssertNumActiveEdges(hn *node.HarnessNode,
- expected int, includeUnannounced bool) []*lnrpc.ChannelEdge {
-
- var edges []*lnrpc.ChannelEdge
-
- old := hn.State.Edge.Public
- if includeUnannounced {
- old = hn.State.Edge.Total
- }
-
- // filterDisabled is a helper closure that filters out disabled
- // channels.
- filterDisabled := func(edge *lnrpc.ChannelEdge) bool {
- if edge.Node1Policy.Disabled {
- return false
- }
- if edge.Node2Policy.Disabled {
- return false
- }
-
- return true
- }
-
- err := wait.NoError(func() error {
- req := &lnrpc.ChannelGraphRequest{
- IncludeUnannounced: includeUnannounced,
- }
- resp := hn.RPC.DescribeGraph(req)
- activeEdges := fn.Filter(filterDisabled, resp.Edges)
- total := len(activeEdges)
-
- if total-old == expected {
- if expected != 0 {
- // NOTE: assume edges come in ascending order
- // that the old edges are at the front of the
- // slice.
- edges = activeEdges[old:]
- }
-
- return nil
- }
-
- return errNumNotMatched(hn.Name(), "num of channel edges",
- expected, total-old, total, old)
- }, DefaultTimeout)
-
- require.NoError(h, err, "timeout while checking for edges")
-
- return edges
-}
-
// AssertNumEdges checks that an expected number of edges can be found in the
// node specified.
func (h *HarnessTest) AssertNumEdges(hn *node.HarnessNode,
@@ -738,15 +685,19 @@ func (h *HarnessTest) AssertStreamChannelForceClosed(hn *node.HarnessNode,
channeldb.ChanStatusLocalCloseInitiator.String(),
"channel not coop broadcasted")
+ // Get the closing txid.
+ closeTxid, err := chainhash.NewHashFromStr(resp.ClosingTxid)
+ require.NoError(h, err)
+
// We'll now, generate a single block, wait for the final close status
// update, then ensure that the closing transaction was included in the
// block.
- block := h.MineBlocksAndAssertNumTxes(1, 1)[0]
+ closeTx := h.AssertTxInMempool(*closeTxid)
+ h.MineBlockWithTx(closeTx)
// Consume one close event and assert the closing txid can be found in
// the block.
closingTxid := h.WaitForChannelCloseEvent(stream)
- h.AssertTxInBlock(block, closingTxid)
// We should see zero waiting close channels and 1 pending force close
// channels now.
@@ -1309,58 +1260,6 @@ func (h *HarnessTest) AssertNumActiveHtlcs(hn *node.HarnessNode, num int) {
hn.Name())
}
-// AssertActiveHtlcs makes sure the node has the _exact_ HTLCs matching
-// payHashes on _all_ their channels.
-func (h *HarnessTest) AssertActiveHtlcs(hn *node.HarnessNode,
- payHashes ...[]byte) {
-
- err := wait.NoError(func() error {
- // We require the RPC call to be succeeded and won't wait for
- // it as it's an unexpected behavior.
- req := &lnrpc.ListChannelsRequest{}
- nodeChans := hn.RPC.ListChannels(req)
-
- for _, ch := range nodeChans.Channels {
- // Record all payment hashes active for this channel.
- htlcHashes := make(map[string]struct{})
-
- for _, htlc := range ch.PendingHtlcs {
- h := hex.EncodeToString(htlc.HashLock)
- _, ok := htlcHashes[h]
- if ok {
- return fmt.Errorf("duplicate HashLock "+
- "in PendingHtlcs: %v",
- ch.PendingHtlcs)
- }
- htlcHashes[h] = struct{}{}
- }
-
- // Channel should have exactly the payHashes active.
- if len(payHashes) != len(htlcHashes) {
- return fmt.Errorf("node [%s:%x] had %v "+
- "htlcs active, expected %v",
- hn.Name(), hn.PubKey[:],
- len(htlcHashes), len(payHashes))
- }
-
- // Make sure all the payHashes are active.
- for _, payHash := range payHashes {
- h := hex.EncodeToString(payHash)
- if _, ok := htlcHashes[h]; ok {
- continue
- }
-
- return fmt.Errorf("node [%s:%x] didn't have: "+
- "the payHash %v active", hn.Name(),
- hn.PubKey[:], h)
- }
- }
-
- return nil
- }, DefaultTimeout)
- require.NoError(h, err, "timeout checking active HTLCs")
-}
-
// AssertIncomingHTLCActive asserts the node has a pending incoming HTLC in the
// given channel. Returns the HTLC if found and active.
func (h *HarnessTest) AssertIncomingHTLCActive(hn *node.HarnessNode,
@@ -1609,8 +1508,9 @@ func (h *HarnessTest) AssertNumHTLCsAndStage(hn *node.HarnessNode,
}
if len(target.PendingHtlcs) != num {
- return fmt.Errorf("got %d pending htlcs, want %d",
- len(target.PendingHtlcs), num)
+ return fmt.Errorf("got %d pending htlcs, want %d, %s",
+ len(target.PendingHtlcs), num,
+ lnutils.SpewLogClosure(target.PendingHtlcs)())
}
for i, htlc := range target.PendingHtlcs {
@@ -2786,3 +2686,36 @@ func (h *HarnessTest) FindSweepingTxns(txns []*wire.MsgTx,
return sweepTxns
}
+
+// AssertForceCloseAndAnchorTxnsInMempool asserts that the force close and
+// anchor sweep txns are found in the mempool and returns the force close tx
+// and the anchor sweep tx.
+func (h *HarnessTest) AssertForceCloseAndAnchorTxnsInMempool() (*wire.MsgTx,
+ *wire.MsgTx) {
+
+ // Assert there are two txns in the mempool.
+ txns := h.GetNumTxsFromMempool(2)
+
+ // Assume the first is the force close tx.
+ forceCloseTx, anchorSweepTx := txns[0], txns[1]
+
+ // Get the txid.
+ closeTxid := forceCloseTx.TxHash()
+
+ // We now check whether there is an anchor input used in the assumed
+ // anchorSweepTx by checking every input's previous outpoint against
+ // the assumed closingTxid. If we fail to find one, it means the first
+ // item from the above txns is the anchor sweeping tx.
+ for _, inp := range anchorSweepTx.TxIn {
+ if inp.PreviousOutPoint.Hash == closeTxid {
+ // Found a match, this is indeed the anchor sweeping tx
+ // so we return it here.
+ return forceCloseTx, anchorSweepTx
+ }
+ }
+
+ // The assumed order is incorrect so we swap and return.
+ forceCloseTx, anchorSweepTx = anchorSweepTx, forceCloseTx
+
+ return forceCloseTx, anchorSweepTx
+}
diff --git a/lntest/harness_miner.go b/lntest/harness_miner.go
index 22b2b95acc..17fd864ed7 100644
--- a/lntest/harness_miner.go
+++ b/lntest/harness_miner.go
@@ -196,7 +196,8 @@ func (h *HarnessTest) mineTillForceCloseResolved(hn *node.HarnessNode) {
return nil
}, DefaultTimeout)
- require.NoErrorf(h, err, "assert force close resolved timeout")
+ require.NoErrorf(h, err, "%s: assert force close resolved timeout",
+ hn.Name())
}
// AssertTxInMempool asserts a given transaction can be found in the mempool.
diff --git a/lntest/harness_node_manager.go b/lntest/harness_node_manager.go
index 6ad4b90318..2040acec7f 100644
--- a/lntest/harness_node_manager.go
+++ b/lntest/harness_node_manager.go
@@ -40,10 +40,6 @@ type nodeManager struct {
// {pubkey: *HarnessNode}.
activeNodes map[uint32]*node.HarnessNode
- // standbyNodes is a map of all the standby nodes, format:
- // {pubkey: *HarnessNode}.
- standbyNodes map[uint32]*node.HarnessNode
-
// nodeCounter is a monotonically increasing counter that's used as the
// node's unique ID.
nodeCounter atomic.Uint32
@@ -57,11 +53,10 @@ func newNodeManager(lndBinary string, dbBackend node.DatabaseBackend,
nativeSQL bool) *nodeManager {
return &nodeManager{
- lndBinary: lndBinary,
- dbBackend: dbBackend,
- nativeSQL: nativeSQL,
- activeNodes: make(map[uint32]*node.HarnessNode),
- standbyNodes: make(map[uint32]*node.HarnessNode),
+ lndBinary: lndBinary,
+ dbBackend: dbBackend,
+ nativeSQL: nativeSQL,
+ activeNodes: make(map[uint32]*node.HarnessNode),
}
}
diff --git a/lntest/node/config.go b/lntest/node/config.go
index 1e8129e7d8..4fccbf6ab5 100644
--- a/lntest/node/config.go
+++ b/lntest/node/config.go
@@ -41,6 +41,40 @@ var (
btcdExecutable = flag.String(
"btcdexec", "", "full path to btcd binary",
)
+
+ // CfgLegacy specifies the config used to create a node that uses the
+ // legacy channel format.
+ CfgLegacy = []string{"--protocol.legacy.committweak"}
+
+ // CfgStaticRemoteKey specifies the config used to create a node that
+ // uses the static remote key feature.
+ CfgStaticRemoteKey = []string{}
+
+ // CfgAnchor specifies the config used to create a node that uses the
+ // anchor output feature.
+ CfgAnchor = []string{"--protocol.anchors"}
+
+ // CfgLeased specifies the config used to create a node that uses the
+ // leased channel feature.
+ CfgLeased = []string{
+ "--protocol.anchors",
+ "--protocol.script-enforced-lease",
+ }
+
+ // CfgSimpleTaproot specifies the config used to create a node that
+ // uses the simple taproot feature.
+ CfgSimpleTaproot = []string{
+ "--protocol.anchors",
+ "--protocol.simple-taproot-chans",
+ }
+
+ // CfgZeroConf specifies the config used to create a node that uses the
+ // zero-conf channel feature.
+ CfgZeroConf = []string{
+ "--protocol.anchors",
+ "--protocol.option-scid-alias",
+ "--protocol.zero-conf",
+ }
)
type DatabaseBackend int
@@ -199,7 +233,7 @@ func (cfg *BaseNodeConfig) GenArgs() []string {
nodeArgs := []string{
"--nobootstrap",
- "--debuglevel=debug",
+ "--debuglevel=debug,CRTR=trace",
"--bitcoin.defaultchanconfs=1",
"--accept-keysend",
"--keep-failed-payment-attempts",
diff --git a/lntest/node/harness_node.go b/lntest/node/harness_node.go
index 9ef1f212c0..d1353c25cf 100644
--- a/lntest/node/harness_node.go
+++ b/lntest/node/harness_node.go
@@ -983,6 +983,67 @@ func finalizeLogfile(hn *HarnessNode) {
getFinalizedLogFilePrefix(hn),
)
renameFile(hn.filename, newFileName)
+
+ // Assert the node has shut down from the log file.
+ err := assertNodeShutdown(newFileName)
+ if err != nil {
+ err := fmt.Errorf("[%s]: assert shutdown failed in log[%s]: %w",
+ hn.Name(), newFileName, err)
+ panic(err)
+ }
+}
+
+// assertNodeShutdown asserts that the node has shut down properly by checking
+// the last lines of the log file for the shutdown message "Shutdown complete".
+func assertNodeShutdown(filename string) error {
+ file, err := os.Open(filename)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ // Read more than one line to make sure we get the last line.
+ // const linesSize = 200
+ //
+ // NOTE: Reading 200 bytes of lines should be more than enough to find
+ // the `Shutdown complete` message. However, this is only true if the
+ // message is printed the last, which means `lnd` will properly wait
+ // for all its subsystems to shut down before exiting. Unfortunately
+ // there is at least one bug in the shutdown process where we don't
+ // wait for the chain backend to fully quit first, which can be easily
+ // reproduced by turning on `RPCC=trace` and use a linesSize of 200.
+ //
+ // TODO(yy): fix the shutdown process and remove this workaround by
+ // refactoring the lnd to use only one rpcclient, which requires quite
+ // some work on the btcwallet front.
+ const linesSize = 1000
+
+ buf := make([]byte, linesSize)
+ stat, statErr := file.Stat()
+ if statErr != nil {
+ return err
+ }
+
+ start := stat.Size() - linesSize
+ _, err = file.ReadAt(buf, start)
+ if err != nil {
+ return err
+ }
+
+ // Exit early if the shutdown line is found.
+ if bytes.Contains(buf, []byte("Shutdown complete")) {
+ return nil
+ }
+
+ // For etcd tests, we need to check for the line where the node is
+ // blocked at wallet unlock since we are testing how such a behavior is
+ // handled by etcd.
+ if bytes.Contains(buf, []byte("wallet and unlock")) {
+ return nil
+ }
+
+ return fmt.Errorf("node did not shut down properly: found log "+
+ "lines: %s", buf)
}
// finalizeEtcdLog saves the etcd log files when test ends.
diff --git a/lntest/node/state.go b/lntest/node/state.go
index 6b94462402..a89ab7d2cc 100644
--- a/lntest/node/state.go
+++ b/lntest/node/state.go
@@ -312,10 +312,10 @@ func (s *State) updateEdgeStats() {
// filterDisabled is a helper closure that filters out disabled
// channels.
filterDisabled := func(edge *lnrpc.ChannelEdge) bool {
- if edge.Node1Policy.Disabled {
+ if edge.Node1Policy != nil && edge.Node1Policy.Disabled {
return false
}
- if edge.Node2Policy.Disabled {
+ if edge.Node2Policy != nil && edge.Node2Policy.Disabled {
return false
}
diff --git a/lntest/wait/timeouts_darwin.go b/lntest/wait/timeouts_darwin.go
index f992d06b22..42b1d0dc6c 100644
--- a/lntest/wait/timeouts_darwin.go
+++ b/lntest/wait/timeouts_darwin.go
@@ -29,7 +29,21 @@ const (
// NodeStartTimeout is the timeout value when waiting for a node to
// become fully started.
- NodeStartTimeout = time.Minute * 2
+ //
+ // TODO(yy): There is an optimization we can do to increase the time it
+ // takes to finish the initial wallet sync. Instead of finding the
+ // block birthday using binary search in btcwallet, we can instead
+ // search optimistically by looking at the chain tip minus X blocks to
+ // get the birthday block. This way in the test the node won't attempt
+ // to sync from the beginning of the chain, which is always the case
+ // due to how regtest blocks are mined.
+ // The other direction of optimization is to change the precision of
+ // the regtest block's median time. By consensus, we need to increase
+ // at least one second(?), this means in regtest when large amount of
+ // blocks are mined in a short time, the block time is actually in the
+ // future. We could instead allow the median time to increase by
+ // microseconds for itests.
+ NodeStartTimeout = time.Minute * 3
// SqliteBusyTimeout is the maximum time that a call to the sqlite db
// will wait for the connection to become available.
diff --git a/lntest/wait/timeouts_remote_db.go b/lntest/wait/timeouts_remote_db.go
index 43cd6e022b..ae7043ff0e 100644
--- a/lntest/wait/timeouts_remote_db.go
+++ b/lntest/wait/timeouts_remote_db.go
@@ -29,7 +29,7 @@ const (
// AsyncBenchmarkTimeout is the timeout used when running the async
// payments benchmark.
- AsyncBenchmarkTimeout = time.Minute*2 + extraTimeout
+ AsyncBenchmarkTimeout = time.Minute*5 + extraTimeout
// NodeStartTimeout is the timeout value when waiting for a node to
// become fully started.
diff --git a/lnutils/log.go b/lnutils/log.go
index a32738bdf4..128bc6fc83 100644
--- a/lnutils/log.go
+++ b/lnutils/log.go
@@ -1,6 +1,10 @@
package lnutils
-import "github.com/davecgh/go-spew/spew"
+import (
+ "strings"
+
+ "github.com/davecgh/go-spew/spew"
+)
// LogClosure is used to provide a closure over expensive logging operations so
// don't have to be performed when the logging level doesn't warrant it.
@@ -25,3 +29,10 @@ func SpewLogClosure(a any) LogClosure {
return spew.Sdump(a)
}
}
+
+// NewSeparatorClosure return a new closure that logs a separator line.
+func NewSeparatorClosure() LogClosure {
+ return func() string {
+ return strings.Repeat("=", 80)
+ }
+}
diff --git a/lnwallet/channel.go b/lnwallet/channel.go
index e7ec607727..96b3b75aa6 100644
--- a/lnwallet/channel.go
+++ b/lnwallet/channel.go
@@ -1711,7 +1711,7 @@ func (lc *LightningChannel) restorePendingRemoteUpdates(
localCommitmentHeight uint64,
pendingRemoteCommit *commitment) error {
- lc.log.Debugf("Restoring %v dangling remote updates",
+ lc.log.Debugf("Restoring %v dangling remote updates pending our sig",
len(unsignedAckedUpdates))
for _, logUpdate := range unsignedAckedUpdates {
@@ -1830,6 +1830,9 @@ func (lc *LightningChannel) restorePendingLocalUpdates(
pendingCommit := pendingRemoteCommitDiff.Commitment
pendingHeight := pendingCommit.CommitHeight
+ lc.log.Debugf("Restoring pending remote commitment %v at commit "+
+ "height %v", pendingCommit.CommitTx.TxHash(), pendingHeight)
+
auxResult, err := fn.MapOptionZ(
lc.leafStore,
func(s AuxLeafStore) fn.Result[CommitDiffAuxResult] {
diff --git a/lnwallet/wallet.go b/lnwallet/wallet.go
index a9018437d5..a236894e09 100644
--- a/lnwallet/wallet.go
+++ b/lnwallet/wallet.go
@@ -733,7 +733,7 @@ func (l *LightningWallet) RegisterFundingIntent(expectedID [32]byte,
}
if _, ok := l.fundingIntents[expectedID]; ok {
- return fmt.Errorf("%w: already has intent registered: %v",
+ return fmt.Errorf("%w: already has intent registered: %x",
ErrDuplicatePendingChanID, expectedID[:])
}
diff --git a/log.go b/log.go
index c88208ef3b..89047b2eed 100644
--- a/log.go
+++ b/log.go
@@ -9,6 +9,7 @@ import (
sphinx "github.com/lightningnetwork/lightning-onion"
"github.com/lightningnetwork/lnd/autopilot"
"github.com/lightningnetwork/lnd/build"
+ "github.com/lightningnetwork/lnd/chainio"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/chainreg"
"github.com/lightningnetwork/lnd/chanacceptor"
@@ -192,6 +193,7 @@ func SetupLoggers(root *build.SubLoggerManager, interceptor signal.Interceptor)
AddSubLogger(
root, blindedpath.Subsystem, interceptor, blindedpath.UseLogger,
)
+ AddSubLogger(root, chainio.Subsystem, interceptor, chainio.UseLogger)
}
// AddSubLogger is a helper method to conveniently create and register the
diff --git a/make/testing_flags.mk b/make/testing_flags.mk
index b2db6861c3..ba5b3fb378 100644
--- a/make/testing_flags.mk
+++ b/make/testing_flags.mk
@@ -10,6 +10,7 @@ COVER_PKG = $$(go list -deps -tags="$(DEV_TAGS)" ./... | grep '$(PKG)' | grep -v
NUM_ITEST_TRANCHES = 4
ITEST_PARALLELISM = $(NUM_ITEST_TRANCHES)
POSTGRES_START_DELAY = 5
+SHUFFLE_SEED = 0
# If rpc option is set also add all extra RPC tags to DEV_TAGS
ifneq ($(with-rpc),)
@@ -27,6 +28,11 @@ ifneq ($(parallel),)
ITEST_PARALLELISM = $(parallel)
endif
+# Set the seed for shuffling the test cases.
+ifneq ($(shuffleseed),)
+SHUFFLE_SEED = $(shuffleseed)
+endif
+
# Windows needs to append a .exe suffix to all executable files, otherwise it
# won't run them.
ifneq ($(windows),)
diff --git a/peer/brontide.go b/peer/brontide.go
index ae4d373cac..50d5111016 100644
--- a/peer/brontide.go
+++ b/peer/brontide.go
@@ -1467,10 +1467,18 @@ func (p *Brontide) Disconnect(reason error) {
// Make sure initialization has completed before we try to tear things
// down.
- select {
- case <-p.startReady:
- case <-p.quit:
- return
+ //
+ // NOTE: We only read the `startReady` chan if the peer has been
+ // started, otherwise we will skip reading it as this chan won't be
+ // closed, hence blocks forever.
+ if atomic.LoadInt32(&p.started) == 1 {
+ p.log.Debugf("Started, waiting on startReady signal")
+
+ select {
+ case <-p.startReady:
+ case <-p.quit:
+ return
+ }
}
err := fmt.Errorf("disconnecting %s, reason: %v", p, reason)
diff --git a/protofsm/daemon_events.go b/protofsm/daemon_events.go
new file mode 100644
index 0000000000..e5de0b6951
--- /dev/null
+++ b/protofsm/daemon_events.go
@@ -0,0 +1,122 @@
+package protofsm
+
+import (
+ "github.com/btcsuite/btcd/btcec/v2"
+ "github.com/btcsuite/btcd/chaincfg/chainhash"
+ "github.com/btcsuite/btcd/wire"
+ "github.com/lightningnetwork/lnd/chainntnfs"
+ "github.com/lightningnetwork/lnd/fn"
+ "github.com/lightningnetwork/lnd/lnwire"
+)
+
+// DaemonEvent is a special event that can be emitted by a state transition
+// function. A state machine can use this to perform side effects, such as
+// sending a message to a peer, or broadcasting a transaction.
+type DaemonEvent interface {
+ daemonSealed()
+}
+
+// DaemonEventSet is a set of daemon events that can be emitted by a state
+// transition.
+type DaemonEventSet []DaemonEvent
+
+// DaemonEvents is a special type constraint that enumerates all the possible
+// types of daemon events.
+type DaemonEvents interface {
+ SendMsgEvent[any] | BroadcastTxn | RegisterSpend[any] |
+ RegisterConf[any]
+}
+
+// SendPredicate is a function that returns true if the target message should
+// sent.
+type SendPredicate = func() bool
+
+// SendMsgEvent is a special event that can be emitted by a state transition
+// that instructs the daemon to send the contained message to the target peer.
+type SendMsgEvent[Event any] struct {
+ // TargetPeer is the peer to send the message to.
+ TargetPeer btcec.PublicKey
+
+ // Msgs is the set of messages to send to the target peer.
+ Msgs []lnwire.Message
+
+ // SendWhen implements a system for a conditional send once a special
+ // send predicate has been met.
+ //
+ // TODO(roasbeef): contrast with usage of OnCommitFlush, etc
+ SendWhen fn.Option[SendPredicate]
+
+ // PostSendEvent is an optional event that is to be emitted after the
+ // message has been sent. If a SendWhen is specified, then this will
+ // only be executed after that returns true to unblock the send.
+ PostSendEvent fn.Option[Event]
+}
+
+// daemonSealed indicates that this struct is a DaemonEvent instance.
+func (s *SendMsgEvent[E]) daemonSealed() {}
+
+// BroadcastTxn indicates the target transaction should be broadcast to the
+// network.
+type BroadcastTxn struct {
+ // Tx is the transaction to broadcast.
+ Tx *wire.MsgTx
+
+ // Label is an optional label to attach to the transaction.
+ Label string
+}
+
+// daemonSealed indicates that this struct is a DaemonEvent instance.
+func (b *BroadcastTxn) daemonSealed() {}
+
+// SpendMapper is a function that's used to map a spend notification to a
+// custom state machine event.
+type SpendMapper[Event any] func(*chainntnfs.SpendDetail) Event
+
+// RegisterSpend is used to request that a certain event is sent into the state
+// machine once the specified outpoint has been spent.
+type RegisterSpend[Event any] struct {
+ // OutPoint is the outpoint on chain to watch.
+ OutPoint wire.OutPoint
+
+ // PkScript is the script that we expect to be spent along with the
+ // outpoint.
+ PkScript []byte
+
+ // HeightHint is a value used to give the chain scanner a hint on how
+ // far back it needs to start its search.
+ HeightHint uint32
+
+ // PostSpendEvent is a special spend mapper, that if present, will be
+ // used to map the protofsm spend event to a custom event.
+ PostSpendEvent fn.Option[SpendMapper[Event]]
+}
+
+// daemonSealed indicates that this struct is a DaemonEvent instance.
+func (r *RegisterSpend[E]) daemonSealed() {}
+
+// RegisterConf is used to request that a certain event is sent into the state
+// machien once the specified outpoint has been spent.
+type RegisterConf[Event any] struct {
+ // Txid is the txid of the txn we want to watch the chain for.
+ Txid chainhash.Hash
+
+ // PkScript is the script that we expect to be created along with the
+ // outpoint.
+ PkScript []byte
+
+ // HeightHint is a value used to give the chain scanner a hint on how
+ // far back it needs to start its search.
+ HeightHint uint32
+
+ // NumConfs is the number of confirmations that the spending
+ // transaction needs to dispatch an event.
+ NumConfs fn.Option[uint32]
+
+ // PostConfEvent is an event that's sent back to the requester once the
+ // transaction specified above has confirmed in the chain with
+ // sufficient depth.
+ PostConfEvent fn.Option[Event]
+}
+
+// daemonSealed indicates that this struct is a DaemonEvent instance.
+func (r *RegisterConf[E]) daemonSealed() {}
diff --git a/protofsm/log.go b/protofsm/log.go
new file mode 100644
index 0000000000..8ff9c1b62f
--- /dev/null
+++ b/protofsm/log.go
@@ -0,0 +1,29 @@
+package protofsm
+
+import (
+ "github.com/btcsuite/btclog"
+ "github.com/lightningnetwork/lnd/build"
+)
+
+// log is a logger that is initialized with no output filters. This
+// means the package will not perform any logging by default until the caller
+// requests it.
+var log btclog.Logger
+
+// The default amount of logging is none.
+func init() {
+ UseLogger(build.NewSubLogger("PFSM", nil))
+}
+
+// DisableLog disables all library log output. Logging output is disabled
+// by default until UseLogger is called.
+func DisableLog() {
+ UseLogger(btclog.Disabled)
+}
+
+// UseLogger uses a specified Logger to output package logging info.
+// This should be used in preference to SetLogWriter if the caller is also
+// using btclog.
+func UseLogger(logger btclog.Logger) {
+ log = logger
+}
diff --git a/protofsm/msg_mapper.go b/protofsm/msg_mapper.go
new file mode 100644
index 0000000000..b96d677e6b
--- /dev/null
+++ b/protofsm/msg_mapper.go
@@ -0,0 +1,15 @@
+package protofsm
+
+import (
+ "github.com/lightningnetwork/lnd/fn"
+ "github.com/lightningnetwork/lnd/lnwire"
+)
+
+// MsgMapper is used to map incoming wire messages into a FSM event. This is
+// useful to decouple the translation of an outside or wire message into an
+// event type that can be understood by the FSM.
+type MsgMapper[Event any] interface {
+ // MapMsg maps a wire message into a FSM event. If the message is not
+ // mappable, then an None is returned.
+ MapMsg(msg lnwire.Message) fn.Option[Event]
+}
diff --git a/protofsm/state_machine.go b/protofsm/state_machine.go
new file mode 100644
index 0000000000..ecbd748347
--- /dev/null
+++ b/protofsm/state_machine.go
@@ -0,0 +1,670 @@
+package protofsm
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/btcsuite/btcd/btcec/v2"
+ "github.com/btcsuite/btcd/chaincfg/chainhash"
+ "github.com/btcsuite/btcd/wire"
+ "github.com/lightningnetwork/lnd/chainntnfs"
+ "github.com/lightningnetwork/lnd/fn"
+ "github.com/lightningnetwork/lnd/lnutils"
+ "github.com/lightningnetwork/lnd/lnwire"
+)
+
+const (
+ // pollInterval is the interval at which we'll poll the SendWhen
+ // predicate if specified.
+ pollInterval = time.Millisecond * 100
+)
+
+// EmittedEvent is a special type that can be emitted by a state transition.
+// This can container internal events which are to be routed back to the state,
+// or external events which are to be sent to the daemon.
+type EmittedEvent[Event any] struct {
+ // InternalEvent is an optional internal event that is to be routed
+ // back to the target state. This enables state to trigger one or many
+ // state transitions without a new external event.
+ InternalEvent fn.Option[[]Event]
+
+ // ExternalEvent is an optional external event that is to be sent to
+ // the daemon for dispatch. Usually, this is some form of I/O.
+ ExternalEvents fn.Option[DaemonEventSet]
+}
+
+// StateTransition is a state transition type. It denotes the next state to go
+// to, and also the set of events to emit.
+type StateTransition[Event any, Env Environment] struct {
+ // NextState is the next state to transition to.
+ NextState State[Event, Env]
+
+ // NewEvents is the set of events to emit.
+ NewEvents fn.Option[EmittedEvent[Event]]
+}
+
+// Environment is an abstract interface that represents the environment that
+// the state machine will execute using. From the PoV of the main state machine
+// executor, we just care about being able to clean up any resources that were
+// allocated by the environment.
+type Environment interface {
+ // Name returns the name of the environment. This is used to uniquely
+ // identify the environment of related state machines.
+ Name() string
+}
+
+// State defines an abstract state along, namely its state transition function
+// that takes as input an event and an environment, and returns a state
+// transition (next state, and set of events to emit). As state can also either
+// be terminal, or not, a terminal event causes state execution to halt.
+type State[Event any, Env Environment] interface {
+ // ProcessEvent takes an event and an environment, and returns a new
+ // state transition. This will be iteratively called until either a
+ // terminal state is reached, or no further internal events are
+ // emitted.
+ ProcessEvent(event Event, env Env) (*StateTransition[Event, Env], error)
+
+ // IsTerminal returns true if this state is terminal, and false
+ // otherwise.
+ IsTerminal() bool
+
+ // TODO(roasbeef): also add state serialization?
+}
+
+// DaemonAdapters is a set of methods that server as adapters to bridge the
+// pure world of the FSM to the real world of the daemon. These will be used to
+// do things like broadcast transactions, or send messages to peers.
+type DaemonAdapters interface {
+ // SendMessages sends the target set of messages to the target peer.
+ SendMessages(btcec.PublicKey, []lnwire.Message) error
+
+ // BroadcastTransaction broadcasts a transaction with the target label.
+ BroadcastTransaction(*wire.MsgTx, string) error
+
+ // RegisterConfirmationsNtfn registers an intent to be notified once
+ // txid reaches numConfs confirmations. We also pass in the pkScript as
+ // the default light client instead needs to match on scripts created
+ // in the block. If a nil txid is passed in, then not only should we
+ // match on the script, but we should also dispatch once the
+ // transaction containing the script reaches numConfs confirmations.
+ // This can be useful in instances where we only know the script in
+ // advance, but not the transaction containing it.
+ //
+ // TODO(roasbeef): could abstract further?
+ RegisterConfirmationsNtfn(txid *chainhash.Hash, pkScript []byte,
+ numConfs, heightHint uint32,
+ opts ...chainntnfs.NotifierOption,
+ ) (*chainntnfs.ConfirmationEvent, error)
+
+ // RegisterSpendNtfn registers an intent to be notified once the target
+ // outpoint is successfully spent within a transaction. The script that
+ // the outpoint creates must also be specified. This allows this
+ // interface to be implemented by BIP 158-like filtering.
+ RegisterSpendNtfn(outpoint *wire.OutPoint, pkScript []byte,
+ heightHint uint32) (*chainntnfs.SpendEvent, error)
+}
+
+// stateQuery is used by outside callers to query the internal state of the
+// state machine.
+type stateQuery[Event any, Env Environment] struct {
+ // CurrentState is a channel that will be sent the current state of the
+ // state machine.
+ CurrentState chan State[Event, Env]
+}
+
+// StateMachine represents an abstract FSM that is able to process new incoming
+// events and drive a state machine to termination. This implementation uses
+// type params to abstract over the types of events and environment. Events
+// trigger new state transitions, that use the environment to perform some
+// action.
+//
+// TODO(roasbeef): terminal check, daemon event execution, init?
+type StateMachine[Event any, Env Environment] struct {
+ cfg StateMachineCfg[Event, Env]
+
+ // events is the channel that will be used to send new events to the
+ // FSM.
+ events chan Event
+
+ // newStateEvents is an EventDistributor that will be used to notify
+ // any relevant callers of new state transitions that occur.
+ newStateEvents *fn.EventDistributor[State[Event, Env]]
+
+ // stateQuery is a channel that will be used by outside callers to
+ // query the internal state machine state.
+ stateQuery chan stateQuery[Event, Env]
+
+ wg fn.GoroutineManager
+ quit chan struct{}
+
+ startOnce sync.Once
+ stopOnce sync.Once
+}
+
+// ErrorReporter is an interface that's used to report errors that occur during
+// state machine execution.
+type ErrorReporter interface {
+ // ReportError is a method that's used to report an error that occurred
+ // during state machine execution.
+ ReportError(err error)
+}
+
+// StateMachineCfg is a configuration struct that's used to create a new state
+// machine.
+type StateMachineCfg[Event any, Env Environment] struct {
+ // ErrorReporter is used to report errors that occur during state
+ // transitions.
+ ErrorReporter ErrorReporter
+
+ // Daemon is a set of adapters that will be used to bridge the FSM to
+ // the daemon.
+ Daemon DaemonAdapters
+
+ // InitialState is the initial state of the state machine.
+ InitialState State[Event, Env]
+
+ // Env is the environment that the state machine will use to execute.
+ Env Env
+
+ // InitEvent is an optional event that will be sent to the state
+ // machine as if it was emitted at the onset of the state machine. This
+ // can be used to set up tracking state such as a txid confirmation
+ // event.
+ InitEvent fn.Option[DaemonEvent]
+
+ // MsgMapper is an optional message mapper that can be used to map
+ // normal wire messages into FSM events.
+ MsgMapper fn.Option[MsgMapper[Event]]
+
+ // CustomPollInterval is an optional custom poll interval that can be
+ // used to set a quicker interval for tests.
+ CustomPollInterval fn.Option[time.Duration]
+}
+
+// NewStateMachine creates a new state machine given a set of daemon adapters,
+// an initial state, an environment, and an event to process as if emitted at
+// the onset of the state machine. Such an event can be used to set up tracking
+// state such as a txid confirmation event.
+func NewStateMachine[Event any, Env Environment](cfg StateMachineCfg[Event, Env], //nolint:lll
+) StateMachine[Event, Env] {
+
+ return StateMachine[Event, Env]{
+ cfg: cfg,
+ events: make(chan Event, 1),
+ stateQuery: make(chan stateQuery[Event, Env]),
+ wg: *fn.NewGoroutineManager(context.Background()),
+ newStateEvents: fn.NewEventDistributor[State[Event, Env]](),
+ quit: make(chan struct{}),
+ }
+}
+
+// Start starts the state machine. This will spawn a goroutine that will drive
+// the state machine to completion.
+func (s *StateMachine[Event, Env]) Start() {
+ s.startOnce.Do(func() {
+ _ = s.wg.Go(func(ctx context.Context) {
+ s.driveMachine()
+ })
+ })
+}
+
+// Stop stops the state machine. This will block until the state machine has
+// reached a stopping point.
+func (s *StateMachine[Event, Env]) Stop() {
+ s.stopOnce.Do(func() {
+ close(s.quit)
+ s.wg.Stop()
+ })
+}
+
+// SendEvent sends a new event to the state machine.
+//
+// TODO(roasbeef): bool if processed?
+func (s *StateMachine[Event, Env]) SendEvent(event Event) {
+ log.Debugf("FSM(%v): sending event: %v", s.cfg.Env.Name(),
+ lnutils.SpewLogClosure(event),
+ )
+
+ select {
+ case s.events <- event:
+ case <-s.quit:
+ return
+ }
+}
+
+// CanHandle returns true if the target message can be routed to the state
+// machine.
+func (s *StateMachine[Event, Env]) CanHandle(msg lnwire.Message) bool {
+ cfgMapper := s.cfg.MsgMapper
+ return fn.MapOptionZ(cfgMapper, func(mapper MsgMapper[Event]) bool {
+ return mapper.MapMsg(msg).IsSome()
+ })
+}
+
+// Name returns the name of the state machine's environment.
+func (s *StateMachine[Event, Env]) Name() string {
+ return s.cfg.Env.Name()
+}
+
+// SendMessage attempts to send a wire message to the state machine. If the
+// message can be mapped using the default message mapper, then true is
+// returned indicating that the message was processed. Otherwise, false is
+// returned.
+func (s *StateMachine[Event, Env]) SendMessage(msg lnwire.Message) bool {
+ // If we have no message mapper, then return false as we can't process
+ // this message.
+ if !s.cfg.MsgMapper.IsSome() {
+ return false
+ }
+
+ log.Debugf("FSM(%v): sending msg: %v", s.cfg.Env.Name(),
+ lnutils.SpewLogClosure(msg),
+ )
+
+ // Otherwise, try to map the message using the default message mapper.
+ // If we can't extract an event, then we'll return false to indicate
+ // that the message wasn't processed.
+ var processed bool
+ s.cfg.MsgMapper.WhenSome(func(mapper MsgMapper[Event]) {
+ event := mapper.MapMsg(msg)
+
+ event.WhenSome(func(event Event) {
+ s.SendEvent(event)
+
+ processed = true
+ })
+ })
+
+ return processed
+}
+
+// CurrentState returns the current state of the state machine.
+func (s *StateMachine[Event, Env]) CurrentState() (State[Event, Env], error) {
+ query := stateQuery[Event, Env]{
+ CurrentState: make(chan State[Event, Env], 1),
+ }
+
+ if !fn.SendOrQuit(s.stateQuery, query, s.quit) {
+ return nil, fmt.Errorf("state machine is shutting down")
+ }
+
+ return fn.RecvOrTimeout(query.CurrentState, time.Second)
+}
+
+// StateSubscriber represents an active subscription to be notified of new
+// state transitions.
+type StateSubscriber[E any, F Environment] *fn.EventReceiver[State[E, F]]
+
+// RegisterStateEvents registers a new event listener that will be notified of
+// new state transitions.
+func (s *StateMachine[Event, Env]) RegisterStateEvents() StateSubscriber[
+ Event, Env] {
+
+ subscriber := fn.NewEventReceiver[State[Event, Env]](10)
+
+ // TODO(roasbeef): instead give the state and the input event?
+
+ s.newStateEvents.RegisterSubscriber(subscriber)
+
+ return subscriber
+}
+
+// RemoveStateSub removes the target state subscriber from the set of active
+// subscribers.
+func (s *StateMachine[Event, Env]) RemoveStateSub(sub StateSubscriber[
+ Event, Env]) {
+
+ _ = s.newStateEvents.RemoveSubscriber(sub)
+}
+
+// executeDaemonEvent executes a daemon event, which is a special type of event
+// that can be emitted as part of the state transition function of the state
+// machine. An error is returned if the type of event is unknown.
+func (s *StateMachine[Event, Env]) executeDaemonEvent(
+ event DaemonEvent) error {
+
+ switch daemonEvent := event.(type) {
+ // This is a send message event, so we'll send the event, and also mind
+ // any preconditions as well as post-send events.
+ case *SendMsgEvent[Event]:
+ sendAndCleanUp := func() error {
+ log.Debugf("FSM(%v): sending message to target(%x): "+
+ "%v", s.cfg.Env.Name(),
+ daemonEvent.TargetPeer.SerializeCompressed(),
+ lnutils.SpewLogClosure(daemonEvent.Msgs),
+ )
+
+ err := s.cfg.Daemon.SendMessages(
+ daemonEvent.TargetPeer, daemonEvent.Msgs,
+ )
+ if err != nil {
+ return fmt.Errorf("unable to send msgs: %w",
+ err)
+ }
+
+ // If a post-send event was specified, then we'll funnel
+ // that back into the main state machine now as well.
+ return fn.MapOptionZ(daemonEvent.PostSendEvent, func(event Event) error { //nolint:lll
+ return s.wg.Go(func(ctx context.Context) {
+ log.Debugf("FSM(%v): sending "+
+ "post-send event: %v",
+ s.cfg.Env.Name(),
+ lnutils.SpewLogClosure(event),
+ )
+
+ s.SendEvent(event)
+ })
+ })
+ }
+
+ // If this doesn't have a SendWhen predicate, then we can just
+ // send it off right away.
+ if !daemonEvent.SendWhen.IsSome() {
+ return sendAndCleanUp()
+ }
+
+ // Otherwise, this has a SendWhen predicate, so we'll need
+ // launch a goroutine to poll the SendWhen, then send only once
+ // the predicate is true.
+ return s.wg.Go(func(ctx context.Context) {
+ predicateTicker := time.NewTicker(
+ s.cfg.CustomPollInterval.UnwrapOr(pollInterval),
+ )
+ defer predicateTicker.Stop()
+
+ log.Infof("FSM(%v): waiting for send predicate to "+
+ "be true", s.cfg.Env.Name())
+
+ for {
+ select {
+ case <-predicateTicker.C:
+ canSend := fn.MapOptionZ(
+ daemonEvent.SendWhen,
+ func(pred SendPredicate) bool {
+ return pred()
+ },
+ )
+
+ if canSend {
+ log.Infof("FSM(%v): send "+
+ "active predicate",
+ s.cfg.Env.Name())
+
+ err := sendAndCleanUp()
+ if err != nil {
+ //nolint:lll
+ log.Errorf("FSM(%v): unable to send message: %v", err)
+ }
+
+ return
+ }
+
+ case <-ctx.Done():
+ return
+ }
+ }
+ })
+
+ // If this is a broadcast transaction event, then we'll broadcast with
+ // the label attached.
+ case *BroadcastTxn:
+ log.Debugf("FSM(%v): broadcasting txn, txid=%v",
+ s.cfg.Env.Name(), daemonEvent.Tx.TxHash())
+
+ err := s.cfg.Daemon.BroadcastTransaction(
+ daemonEvent.Tx, daemonEvent.Label,
+ )
+ if err != nil {
+ return fmt.Errorf("unable to broadcast txn: %w", err)
+ }
+
+ return nil
+
+ // The state machine has requested a new event to be sent once a
+ // transaction spending a specified outpoint has confirmed.
+ case *RegisterSpend[Event]:
+ log.Debugf("FSM(%v): registering spend: %v", s.cfg.Env.Name(),
+ daemonEvent.OutPoint)
+
+ spendEvent, err := s.cfg.Daemon.RegisterSpendNtfn(
+ &daemonEvent.OutPoint, daemonEvent.PkScript,
+ daemonEvent.HeightHint,
+ )
+ if err != nil {
+ return fmt.Errorf("unable to register spend: %w", err)
+ }
+
+ return s.wg.Go(func(ctx context.Context) {
+ for {
+ select {
+ case spend, ok := <-spendEvent.Spend:
+ if !ok {
+ return
+ }
+
+ // If there's a post-send event, then
+ // we'll send that into the current
+ // state now.
+ postSpend := daemonEvent.PostSpendEvent
+ postSpend.WhenSome(func(f SpendMapper[Event]) { //nolint:lll
+ customEvent := f(spend)
+ s.SendEvent(customEvent)
+ })
+
+ return
+
+ case <-ctx.Done():
+ return
+ }
+ }
+ })
+
+ // The state machine has requested a new event to be sent once a
+ // specified txid+pkScript pair has confirmed.
+ case *RegisterConf[Event]:
+ log.Debugf("FSM(%v): registering conf: %v", s.cfg.Env.Name(),
+ daemonEvent.Txid)
+
+ numConfs := daemonEvent.NumConfs.UnwrapOr(1)
+ confEvent, err := s.cfg.Daemon.RegisterConfirmationsNtfn(
+ &daemonEvent.Txid, daemonEvent.PkScript,
+ numConfs, daemonEvent.HeightHint,
+ )
+ if err != nil {
+ return fmt.Errorf("unable to register conf: %w", err)
+ }
+
+ return s.wg.Go(func(ctx context.Context) {
+ for {
+ select {
+ case <-confEvent.Confirmed:
+ // If there's a post-conf event, then
+ // we'll send that into the current
+ // state now.
+ //
+ // TODO(roasbeef): refactor to
+ // dispatchAfterRecv w/ above
+ postConf := daemonEvent.PostConfEvent
+ postConf.WhenSome(func(e Event) {
+ s.SendEvent(e)
+ })
+
+ return
+
+ case <-ctx.Done():
+ return
+ }
+ }
+ })
+ }
+
+ return fmt.Errorf("unknown daemon event: %T", event)
+}
+
+// applyEvents applies a new event to the state machine. This will continue
+// until no further events are emitted by the state machine. Along the way,
+// we'll also ensure to execute any daemon events that are emitted.
+func (s *StateMachine[Event, Env]) applyEvents(currentState State[Event, Env],
+ newEvent Event) (State[Event, Env], error) {
+
+ log.Debugf("FSM(%v): applying new event", s.cfg.Env.Name(),
+ lnutils.SpewLogClosure(newEvent),
+ )
+ eventQueue := fn.NewQueue(newEvent)
+
+ // Given the next event to handle, we'll process the event, then add
+ // any new emitted internal events to our event queue. This continues
+ // until we reach a terminal state, or we run out of internal events to
+ // process.
+ //
+ //nolint:lll
+ for nextEvent := eventQueue.Dequeue(); nextEvent.IsSome(); nextEvent = eventQueue.Dequeue() {
+ err := fn.MapOptionZ(nextEvent, func(event Event) error {
+ log.Debugf("FSM(%v): processing event: %v",
+ s.cfg.Env.Name(),
+ lnutils.SpewLogClosure(event),
+ )
+
+ // Apply the state transition function of the current
+ // state given this new event and our existing env.
+ transition, err := currentState.ProcessEvent(
+ event, s.cfg.Env,
+ )
+ if err != nil {
+ return err
+ }
+
+ newEvents := transition.NewEvents
+ err = fn.MapOptionZ(newEvents, func(events EmittedEvent[Event]) error { //nolint:lll
+ // With the event processed, we'll process any
+ // new daemon events that were emitted as part
+ // of this new state transition.
+ //
+ //nolint:lll
+ err := fn.MapOptionZ(events.ExternalEvents, func(dEvents DaemonEventSet) error {
+ log.Debugf("FSM(%v): processing "+
+ "daemon %v daemon events",
+ s.cfg.Env.Name(), len(dEvents))
+
+ for _, dEvent := range dEvents {
+ err := s.executeDaemonEvent(
+ dEvent,
+ )
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ // Next, we'll add any new emitted events to
+ // our event queue.
+ //
+ //nolint:lll
+ events.InternalEvent.WhenSome(func(es []Event) {
+ for _, inEvent := range es {
+ log.Debugf("FSM(%v): adding "+
+ "new internal event "+
+ "to queue: %v",
+ s.cfg.Env.Name(),
+ lnutils.SpewLogClosure(
+ inEvent,
+ ),
+ )
+
+ eventQueue.Enqueue(inEvent)
+ }
+ })
+
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ log.Infof("FSM(%v): state transition: from_state=%T, "+
+ "to_state=%T",
+ s.cfg.Env.Name(), currentState,
+ transition.NextState)
+
+ // With our events processed, we'll now update our
+ // internal state.
+ currentState = transition.NextState
+
+ // Notify our subscribers of the new state transition.
+ //
+ // TODO(roasbeef): will only give us the outer state?
+ // * let FSMs choose which state to emit?
+ s.newStateEvents.NotifySubscribers(currentState)
+
+ return nil
+ })
+ if err != nil {
+ return currentState, err
+ }
+ }
+
+ return currentState, nil
+}
+
+// driveMachine is the main event loop of the state machine. It accepts any new
+// incoming events, and then drives the state machine forward until it reaches
+// a terminal state.
+func (s *StateMachine[Event, Env]) driveMachine() {
+ log.Debugf("FSM(%v): starting state machine", s.cfg.Env.Name())
+
+ currentState := s.cfg.InitialState
+
+ // Before we start, if we have an init daemon event specified, then
+ // we'll handle that now.
+ err := fn.MapOptionZ(s.cfg.InitEvent, func(event DaemonEvent) error {
+ return s.executeDaemonEvent(event)
+ })
+ if err != nil {
+ log.Errorf("unable to execute init event: %w", err)
+ return
+ }
+
+ // We just started driving the state machine, so we'll notify our
+ // subscribers of this starting state.
+ s.newStateEvents.NotifySubscribers(currentState)
+
+ for {
+ select {
+ // We have a new external event, so we'll drive the state
+ // machine forward until we either run out of internal events,
+ // or we reach a terminal state.
+ case newEvent := <-s.events:
+ newState, err := s.applyEvents(currentState, newEvent)
+ if err != nil {
+ s.cfg.ErrorReporter.ReportError(err)
+
+ log.Errorf("unable to apply event: %v", err)
+
+ // An error occurred, so we'll tear down the
+ // entire state machine as we can't proceed.
+ go s.Stop()
+
+ return
+ }
+
+ currentState = newState
+
+ // An outside caller is querying our state, so we'll return the
+ // latest state.
+ case stateQuery := <-s.stateQuery:
+ if !fn.SendOrQuit(stateQuery.CurrentState, currentState, s.quit) { //nolint:lll
+ return
+ }
+
+ case <-s.wg.Done():
+ return
+ }
+ }
+}
diff --git a/protofsm/state_machine_test.go b/protofsm/state_machine_test.go
new file mode 100644
index 0000000000..0432f386b7
--- /dev/null
+++ b/protofsm/state_machine_test.go
@@ -0,0 +1,456 @@
+package protofsm
+
+import (
+ "encoding/hex"
+ "fmt"
+ "sync/atomic"
+ "testing"
+
+ "github.com/btcsuite/btcd/btcec/v2"
+ "github.com/btcsuite/btcd/chaincfg/chainhash"
+ "github.com/btcsuite/btcd/wire"
+ "github.com/lightningnetwork/lnd/chainntnfs"
+ "github.com/lightningnetwork/lnd/fn"
+ "github.com/lightningnetwork/lnd/lnwire"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+)
+
+type dummyEvents interface {
+ dummy()
+}
+
+type goToFin struct {
+}
+
+func (g *goToFin) dummy() {
+}
+
+type emitInternal struct {
+}
+
+func (e *emitInternal) dummy() {
+}
+
+type daemonEvents struct {
+}
+
+func (s *daemonEvents) dummy() {
+}
+
+type dummyEnv struct {
+ mock.Mock
+}
+
+func (d *dummyEnv) Name() string {
+ return "test"
+}
+
+type dummyStateStart struct {
+ canSend *atomic.Bool
+}
+
+var (
+ hexDecode = func(keyStr string) []byte {
+ keyBytes, _ := hex.DecodeString(keyStr)
+ return keyBytes
+ }
+ pub1, _ = btcec.ParsePubKey(hexDecode(
+ "02ec95e4e8ad994861b95fc5986eedaac24739e5ea3d0634db4c8ccd44cd" +
+ "a126ea",
+ ))
+ pub2, _ = btcec.ParsePubKey(hexDecode(
+ "0356167ba3e54ac542e86e906d4186aba9ca0b9df45001c62b753d33fe06" +
+ "f5b4e8",
+ ))
+)
+
+func (d *dummyStateStart) ProcessEvent(event dummyEvents, env *dummyEnv,
+) (*StateTransition[dummyEvents, *dummyEnv], error) {
+
+ switch event.(type) {
+ case *goToFin:
+ return &StateTransition[dummyEvents, *dummyEnv]{
+ NextState: &dummyStateFin{},
+ }, nil
+
+ // This state will loop back upon itself, but will also emit an event
+ // to head to the terminal state.
+ case *emitInternal:
+ return &StateTransition[dummyEvents, *dummyEnv]{
+ NextState: &dummyStateStart{},
+ NewEvents: fn.Some(EmittedEvent[dummyEvents]{
+ InternalEvent: fn.Some(
+ []dummyEvents{&goToFin{}},
+ ),
+ }),
+ }, nil
+
+ // This state will proceed to the terminal state, but will emit all the
+ // possible daemon events.
+ case *daemonEvents:
+ // This send event can only succeed once the bool turns to
+ // true. After that, then we'll expect another event to take us
+ // to the final state.
+ sendEvent := &SendMsgEvent[dummyEvents]{
+ TargetPeer: *pub1,
+ SendWhen: fn.Some(func() bool {
+ return d.canSend.Load()
+ }),
+ PostSendEvent: fn.Some(dummyEvents(&goToFin{})),
+ }
+
+ // We'll also send out a normal send event that doesn't have
+ // any preconditions.
+ sendEvent2 := &SendMsgEvent[dummyEvents]{
+ TargetPeer: *pub2,
+ }
+
+ return &StateTransition[dummyEvents, *dummyEnv]{
+ // We'll state in this state until the send succeeds
+ // based on our predicate. Then it'll transition to the
+ // final state.
+ NextState: &dummyStateStart{
+ canSend: d.canSend,
+ },
+ NewEvents: fn.Some(EmittedEvent[dummyEvents]{
+ ExternalEvents: fn.Some(DaemonEventSet{
+ sendEvent, sendEvent2,
+ &BroadcastTxn{
+ Tx: &wire.MsgTx{},
+ Label: "test",
+ },
+ }),
+ }),
+ }, nil
+ }
+
+ return nil, fmt.Errorf("unknown event: %T", event)
+}
+
+func (d *dummyStateStart) IsTerminal() bool {
+ return false
+}
+
+type dummyStateFin struct {
+}
+
+func (d *dummyStateFin) ProcessEvent(event dummyEvents, env *dummyEnv,
+) (*StateTransition[dummyEvents, *dummyEnv], error) {
+
+ return &StateTransition[dummyEvents, *dummyEnv]{
+ NextState: &dummyStateFin{},
+ }, nil
+}
+
+func (d *dummyStateFin) IsTerminal() bool {
+ return true
+}
+
+func assertState[Event any, Env Environment](t *testing.T,
+ m *StateMachine[Event, Env], expectedState State[Event, Env]) {
+
+ state, err := m.CurrentState()
+ require.NoError(t, err)
+ require.IsType(t, expectedState, state)
+}
+
+func assertStateTransitions[Event any, Env Environment](
+ t *testing.T, stateSub StateSubscriber[Event, Env],
+ expectedStates []State[Event, Env]) {
+
+ for _, expectedState := range expectedStates {
+ newState := <-stateSub.NewItemCreated.ChanOut()
+
+ require.IsType(t, expectedState, newState)
+ }
+}
+
+type dummyAdapters struct {
+ mock.Mock
+
+ confChan chan *chainntnfs.TxConfirmation
+ spendChan chan *chainntnfs.SpendDetail
+}
+
+func newDaemonAdapters() *dummyAdapters {
+ return &dummyAdapters{
+ confChan: make(chan *chainntnfs.TxConfirmation, 1),
+ spendChan: make(chan *chainntnfs.SpendDetail, 1),
+ }
+}
+
+func (d *dummyAdapters) SendMessages(pub btcec.PublicKey,
+ msgs []lnwire.Message) error {
+
+ args := d.Called(pub, msgs)
+
+ return args.Error(0)
+}
+
+func (d *dummyAdapters) BroadcastTransaction(tx *wire.MsgTx,
+ label string) error {
+
+ args := d.Called(tx, label)
+
+ return args.Error(0)
+}
+
+func (d *dummyAdapters) RegisterConfirmationsNtfn(txid *chainhash.Hash,
+ pkScript []byte, numConfs, heightHint uint32,
+ opts ...chainntnfs.NotifierOption,
+) (*chainntnfs.ConfirmationEvent, error) {
+
+ args := d.Called(txid, pkScript, numConfs)
+
+ err := args.Error(0)
+
+ return &chainntnfs.ConfirmationEvent{
+ Confirmed: d.confChan,
+ }, err
+}
+
+func (d *dummyAdapters) RegisterSpendNtfn(outpoint *wire.OutPoint,
+ pkScript []byte, heightHint uint32) (*chainntnfs.SpendEvent, error) {
+
+ args := d.Called(outpoint, pkScript, heightHint)
+
+ err := args.Error(0)
+
+ return &chainntnfs.SpendEvent{
+ Spend: d.spendChan,
+ }, err
+}
+
+// TestStateMachineOnInitDaemonEvent tests that the state machine will properly
+// execute any init-level daemon events passed into it.
+func TestStateMachineOnInitDaemonEvent(t *testing.T) {
+ // First, we'll create our state machine given the env, and our
+ // starting state.
+ env := &dummyEnv{}
+ startingState := &dummyStateStart{}
+
+ adapters := newDaemonAdapters()
+
+ // We'll make an init event that'll send to a peer, then transition us
+ // to our terminal state.
+ initEvent := &SendMsgEvent[dummyEvents]{
+ TargetPeer: *pub1,
+ PostSendEvent: fn.Some(dummyEvents(&goToFin{})),
+ }
+
+ cfg := StateMachineCfg[dummyEvents, *dummyEnv]{
+ Daemon: adapters,
+ InitialState: startingState,
+ Env: env,
+ InitEvent: fn.Some[DaemonEvent](initEvent),
+ }
+ stateMachine := NewStateMachine(cfg)
+
+ // Before we start up the state machine, we'll assert that the send
+ // message adapter is called on start up.
+ adapters.On("SendMessages", *pub1, mock.Anything).Return(nil)
+
+ stateMachine.Start()
+ defer stateMachine.Stop()
+
+ // As we're triggering internal events, we'll also subscribe to the set
+ // of new states so we can assert as we go.
+ stateSub := stateMachine.RegisterStateEvents()
+ defer stateMachine.RemoveStateSub(stateSub)
+
+ // Assert that we go from the starting state to the final state. The
+ // state machine should now also be on the final terminal state.
+ expectedStates := []State[dummyEvents, *dummyEnv]{
+ &dummyStateStart{}, &dummyStateFin{},
+ }
+ assertStateTransitions(t, stateSub, expectedStates)
+
+ // We'll now assert that after the daemon was started, the send message
+ // adapter was called above as specified in the init event.
+ adapters.AssertExpectations(t)
+ env.AssertExpectations(t)
+}
+
+// TestStateMachineInternalEvents tests that the state machine is able to add
+// new internal events to the event queue for further processing during a state
+// transition.
+func TestStateMachineInternalEvents(t *testing.T) {
+ t.Parallel()
+
+ // First, we'll create our state machine given the env, and our
+ // starting state.
+ env := &dummyEnv{}
+ startingState := &dummyStateStart{}
+
+ adapters := newDaemonAdapters()
+
+ cfg := StateMachineCfg[dummyEvents, *dummyEnv]{
+ Daemon: adapters,
+ InitialState: startingState,
+ Env: env,
+ InitEvent: fn.None[DaemonEvent](),
+ }
+ stateMachine := NewStateMachine(cfg)
+ stateMachine.Start()
+ defer stateMachine.Stop()
+
+ // As we're triggering internal events, we'll also subscribe to the set
+ // of new states so we can assert as we go.
+ stateSub := stateMachine.RegisterStateEvents()
+ defer stateMachine.RemoveStateSub(stateSub)
+
+ // For this transition, we'll send in the emitInternal event, which'll
+ // send us back to the starting event, but emit an internal event.
+ stateMachine.SendEvent(&emitInternal{})
+
+ // We'll now also assert the path we took to get here to ensure the
+ // internal events were processed.
+ expectedStates := []State[dummyEvents, *dummyEnv]{
+ &dummyStateStart{}, &dummyStateStart{}, &dummyStateFin{},
+ }
+ assertStateTransitions(
+ t, stateSub, expectedStates,
+ )
+
+ // We should ultimately end up in the terminal state.
+ assertState[dummyEvents, *dummyEnv](t, &stateMachine, &dummyStateFin{})
+
+ // Make sure all the env expectations were met.
+ env.AssertExpectations(t)
+}
+
+// TestStateMachineDaemonEvents tests that the state machine is able to process
+// daemon emitted as part of the state transition process.
+func TestStateMachineDaemonEvents(t *testing.T) {
+ t.Parallel()
+
+ // First, we'll create our state machine given the env, and our
+ // starting state.
+ env := &dummyEnv{}
+
+ var boolTrigger atomic.Bool
+ startingState := &dummyStateStart{
+ canSend: &boolTrigger,
+ }
+
+ adapters := newDaemonAdapters()
+
+ cfg := StateMachineCfg[dummyEvents, *dummyEnv]{
+ Daemon: adapters,
+ InitialState: startingState,
+ Env: env,
+ InitEvent: fn.None[DaemonEvent](),
+ }
+ stateMachine := NewStateMachine(cfg)
+ stateMachine.Start()
+ defer stateMachine.Stop()
+
+ // As we're triggering internal events, we'll also subscribe to the set
+ // of new states so we can assert as we go.
+ stateSub := stateMachine.RegisterStateEvents()
+ defer stateMachine.RemoveStateSub(stateSub)
+
+ // As soon as we send in the daemon event, we expect the
+ // disable+broadcast events to be processed, as they are unconditional.
+ adapters.On(
+ "BroadcastTransaction", mock.Anything, mock.Anything,
+ ).Return(nil)
+ adapters.On("SendMessages", *pub2, mock.Anything).Return(nil)
+
+ // We'll start off by sending in the daemon event, which'll trigger the
+ // state machine to execute the series of daemon events.
+ stateMachine.SendEvent(&daemonEvents{})
+
+ // We should transition back to the starting state now, after we
+ // started from the very same state.
+ expectedStates := []State[dummyEvents, *dummyEnv]{
+ &dummyStateStart{}, &dummyStateStart{},
+ }
+ assertStateTransitions(t, stateSub, expectedStates)
+
+ // At this point, we expect that the two methods above were called.
+ adapters.AssertExpectations(t)
+
+ // However, we don't expect the SendMessages for the first peer target
+ // to be called yet, as the condition hasn't yet been met.
+ adapters.AssertNotCalled(t, "SendMessages", *pub1)
+
+ // We'll now flip the bool to true, which should cause the SendMessages
+ // method to be called, and for us to transition to the final state.
+ boolTrigger.Store(true)
+ adapters.On("SendMessages", *pub1, mock.Anything).Return(nil)
+
+ expectedStates = []State[dummyEvents, *dummyEnv]{&dummyStateFin{}}
+ assertStateTransitions(t, stateSub, expectedStates)
+
+ adapters.AssertExpectations(t)
+ env.AssertExpectations(t)
+}
+
+type dummyMsgMapper struct {
+ mock.Mock
+}
+
+func (d *dummyMsgMapper) MapMsg(wireMsg lnwire.Message) fn.Option[dummyEvents] {
+ args := d.Called(wireMsg)
+
+ //nolint:forcetypeassert
+ return args.Get(0).(fn.Option[dummyEvents])
+}
+
+// TestStateMachineMsgMapper tests that given a message mapper, we can properly
+// send in wire messages get mapped to FSM events.
+func TestStateMachineMsgMapper(t *testing.T) {
+ // First, we'll create our state machine given the env, and our
+ // starting state.
+ env := &dummyEnv{}
+ startingState := &dummyStateStart{}
+ adapters := newDaemonAdapters()
+
+ // We'll also provide a message mapper that only knows how to map a
+ // single wire message (error).
+ dummyMapper := &dummyMsgMapper{}
+
+ // The only thing we know how to map is the error message, which'll
+ // terminate the state machine.
+ wireError := &lnwire.Error{}
+ initMsg := &lnwire.Init{}
+ dummyMapper.On("MapMsg", wireError).Return(
+ fn.Some(dummyEvents(&goToFin{})),
+ )
+ dummyMapper.On("MapMsg", initMsg).Return(fn.None[dummyEvents]())
+
+ cfg := StateMachineCfg[dummyEvents, *dummyEnv]{
+ Daemon: adapters,
+ InitialState: startingState,
+ Env: env,
+ MsgMapper: fn.Some[MsgMapper[dummyEvents]](dummyMapper),
+ }
+ stateMachine := NewStateMachine(cfg)
+ stateMachine.Start()
+ defer stateMachine.Stop()
+
+ // As we're triggering internal events, we'll also subscribe to the set
+ // of new states so we can assert as we go.
+ stateSub := stateMachine.RegisterStateEvents()
+ defer stateMachine.RemoveStateSub(stateSub)
+
+ // First, we'll verify that the CanHandle method works as expected.
+ require.True(t, stateMachine.CanHandle(wireError))
+ require.False(t, stateMachine.CanHandle(&lnwire.Init{}))
+
+ // Next, we'll attempt to send the wire message into the state machine.
+ // We should transition to the final state.
+ require.True(t, stateMachine.SendMessage(wireError))
+
+ // We should transition to the final state.
+ expectedStates := []State[dummyEvents, *dummyEnv]{
+ &dummyStateStart{}, &dummyStateFin{},
+ }
+ assertStateTransitions(t, stateSub, expectedStates)
+
+ dummyMapper.AssertExpectations(t)
+ adapters.AssertExpectations(t)
+ env.AssertExpectations(t)
+}
diff --git a/routing/payment_lifecycle.go b/routing/payment_lifecycle.go
index 93b214bdea..4ae94c5814 100644
--- a/routing/payment_lifecycle.go
+++ b/routing/payment_lifecycle.go
@@ -180,6 +180,9 @@ func (p *paymentLifecycle) resumePayment(ctx context.Context) ([32]byte,
return [32]byte{}, nil, err
}
+ // Get the payment state.
+ ps := payment.GetState()
+
for _, a := range payment.InFlightHTLCs() {
a := a
@@ -192,7 +195,7 @@ func (p *paymentLifecycle) resumePayment(ctx context.Context) ([32]byte,
// exitWithErr is a helper closure that logs and returns an error.
exitWithErr := func(err error) ([32]byte, *route.Route, error) {
log.Errorf("Payment %v with status=%v failed: %v",
- p.identifier, payment.GetStatus(), err)
+ p.identifier, ps, err)
return [32]byte{}, nil, err
}
@@ -210,7 +213,7 @@ lifecycle:
return exitWithErr(err)
}
- ps := payment.GetState()
+ ps = payment.GetState()
remainingFees := p.calcFeeBudget(ps.FeesPaid)
log.Debugf("Payment %v: status=%v, active_shards=%v, "+
@@ -337,7 +340,7 @@ func (p *paymentLifecycle) checkContext(ctx context.Context) error {
if errors.Is(ctx.Err(), context.DeadlineExceeded) {
reason = channeldb.FailureReasonTimeout
log.Warnf("Payment attempt not completed before "+
- "timeout, id=%s", p.identifier.String())
+ "context timeout, id=%s", p.identifier.String())
} else {
reason = channeldb.FailureReasonCanceled
log.Warnf("Payment attempt context canceled, id=%s",
diff --git a/scripts/itest_parallel.sh b/scripts/itest_parallel.sh
index 2ae7f3f531..ab1b3efa28 100755
--- a/scripts/itest_parallel.sh
+++ b/scripts/itest_parallel.sh
@@ -3,9 +3,10 @@
# Get all the variables.
PROCESSES=$1
TRANCHES=$2
+SHUFFLE_SEED=$3
-# Here we also shift 2 times and get the rest of our flags to pass on in $@.
-shift 2
+# Here we also shift 3 times and get the rest of our flags to pass on in $@.
+shift 3
# Create a variable to hold the final exit code.
exit_code=0
@@ -13,7 +14,7 @@ exit_code=0
# Run commands using xargs in parallel and capture their PIDs
pids=()
for ((i=0; i sweeper -> txPublisher.
+ consumers := []chainio.Consumer{
+ s.chainArb,
+ s.sweeper,
+ s.txPublisher,
+ }
+ s.blockbeatDispatcher.RegisterQueue(consumers)
+}
+
// signAliasUpdate takes a ChannelUpdate and returns the signature. This is
// used for option_scid_alias channels where the ChannelUpdate to be sent back
// may differ from what is on disk.
@@ -2033,12 +2082,41 @@ func (c cleaner) run() {
}
}
+// startLowLevelServices starts the low-level services of the server. These
+// services must be started successfully before running the main server. The
+// services are,
+// 1. the chain notifier.
+//
+// TODO(yy): identify and add more low-level services here.
+func (s *server) startLowLevelServices() error {
+ var startErr error
+
+ cleanup := cleaner{}
+
+ cleanup = cleanup.add(s.cc.ChainNotifier.Stop)
+ if err := s.cc.ChainNotifier.Start(); err != nil {
+ startErr = err
+ }
+
+ if startErr != nil {
+ cleanup.run()
+ }
+
+ return startErr
+}
+
// Start starts the main daemon server, all requested listeners, and any helper
// goroutines.
// NOTE: This function is safe for concurrent access.
//
//nolint:funlen
func (s *server) Start() error {
+ // Get the current blockbeat.
+ beat, err := s.getStartingBeat()
+ if err != nil {
+ return err
+ }
+
var startErr error
// If one sub system fails to start, the following code ensures that the
@@ -2092,12 +2170,6 @@ func (s *server) Start() error {
return
}
- cleanup = cleanup.add(s.cc.ChainNotifier.Stop)
- if err := s.cc.ChainNotifier.Start(); err != nil {
- startErr = err
- return
- }
-
cleanup = cleanup.add(s.cc.BestBlockTracker.Stop)
if err := s.cc.BestBlockTracker.Start(); err != nil {
startErr = err
@@ -2133,13 +2205,13 @@ func (s *server) Start() error {
}
cleanup = cleanup.add(s.txPublisher.Stop)
- if err := s.txPublisher.Start(); err != nil {
+ if err := s.txPublisher.Start(beat); err != nil {
startErr = err
return
}
cleanup = cleanup.add(s.sweeper.Stop)
- if err := s.sweeper.Start(); err != nil {
+ if err := s.sweeper.Start(beat); err != nil {
startErr = err
return
}
@@ -2184,7 +2256,7 @@ func (s *server) Start() error {
}
cleanup = cleanup.add(s.chainArb.Stop)
- if err := s.chainArb.Start(); err != nil {
+ if err := s.chainArb.Start(beat); err != nil {
startErr = err
return
}
@@ -2425,6 +2497,17 @@ func (s *server) Start() error {
srvrLog.Infof("Auto peer bootstrapping is disabled")
}
+ // Start the blockbeat after all other subsystems have been
+ // started so they are ready to receive new blocks.
+ cleanup = cleanup.add(func() error {
+ s.blockbeatDispatcher.Stop()
+ return nil
+ })
+ if err := s.blockbeatDispatcher.Start(); err != nil {
+ startErr = err
+ return
+ }
+
// Set the active flag now that we've completed the full
// startup.
atomic.StoreInt32(&s.active, 1)
@@ -2449,6 +2532,9 @@ func (s *server) Stop() error {
// Shutdown connMgr first to prevent conns during shutdown.
s.connMgr.Stop()
+ // Stop dispatching blocks to other systems immediately.
+ s.blockbeatDispatcher.Stop()
+
// Shutdown the wallet, funding manager, and the rpc server.
if err := s.chanStatusMgr.Stop(); err != nil {
srvrLog.Warnf("failed to stop chanStatusMgr: %v", err)
@@ -4215,9 +4301,14 @@ func (s *server) addPeer(p *peer.Brontide) {
return
}
+ pubBytes := p.IdentityKey().SerializeCompressed()
+
// Ignore new peers if we're shutting down.
if s.Stopped() {
+ srvrLog.Infof("Server stopped, skipped adding peer=%x",
+ pubBytes)
p.Disconnect(ErrServerShuttingDown)
+
return
}
@@ -4226,8 +4317,9 @@ func (s *server) addPeer(p *peer.Brontide) {
// TODO(roasbeef): pipe all requests through to the
// queryHandler/peerManager
- pubSer := p.IdentityKey().SerializeCompressed()
- pubStr := string(pubSer)
+ // NOTE: This pubStr is a raw bytes to string conversion and will NOT
+ // be human-readable.
+ pubStr := string(pubBytes)
s.peersByPub[pubStr] = p
@@ -4240,7 +4332,7 @@ func (s *server) addPeer(p *peer.Brontide) {
// Inform the peer notifier of a peer online event so that it can be reported
// to clients listening for peer events.
var pubKey [33]byte
- copy(pubKey[:], pubSer)
+ copy(pubKey[:], pubBytes)
s.peerNotifier.NotifyPeerOnline(pubKey)
}
@@ -4257,8 +4349,12 @@ func (s *server) addPeer(p *peer.Brontide) {
func (s *server) peerInitializer(p *peer.Brontide) {
defer s.wg.Done()
+ pubBytes := p.IdentityKey().SerializeCompressed()
+
// Avoid initializing peers while the server is exiting.
if s.Stopped() {
+ srvrLog.Infof("Server stopped, skipped initializing peer=%x",
+ pubBytes)
return
}
@@ -4276,8 +4372,6 @@ func (s *server) peerInitializer(p *peer.Brontide) {
s.wg.Add(1)
go s.peerTerminationWatcher(p, ready)
- pubBytes := p.IdentityKey().SerializeCompressed()
-
// Start the peer! If an error occurs, we Disconnect the peer, which
// will unblock the peerTerminationWatcher.
if err := p.Start(); err != nil {
@@ -5096,3 +5190,35 @@ func (s *server) fetchClosedChannelSCIDs() map[lnwire.ShortChannelID]struct{} {
return closedSCIDs
}
+
+// getStartingBeat returns the current beat. This is used during the startup to
+// initialize blockbeat consumers.
+func (s *server) getStartingBeat() (*chainio.Beat, error) {
+ // beat is the current blockbeat.
+ var beat *chainio.Beat
+
+ // We should get a notification with the current best block immediately
+ // by passing a nil block.
+ blockEpochs, err := s.cc.ChainNotifier.RegisterBlockEpochNtfn(nil)
+ if err != nil {
+ return beat, fmt.Errorf("register block epoch ntfn: %w", err)
+ }
+ defer blockEpochs.Cancel()
+
+ // We registered for the block epochs with a nil request. The notifier
+ // should send us the current best block immediately. So we need to
+ // wait for it here because we need to know the current best height.
+ select {
+ case bestBlock := <-blockEpochs.Epochs:
+ srvrLog.Infof("Received initial block %v at height %d",
+ bestBlock.Hash, bestBlock.Height)
+
+ // Update the current blockbeat.
+ beat = chainio.NewBeat(*bestBlock)
+
+ case <-s.quit:
+ srvrLog.Debug("LND shutting down")
+ }
+
+ return beat, nil
+}
diff --git a/sweep/fee_bumper.go b/sweep/fee_bumper.go
index 5ea4d8e4bd..6db8585e63 100644
--- a/sweep/fee_bumper.go
+++ b/sweep/fee_bumper.go
@@ -12,6 +12,7 @@ import (
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcwallet/chain"
+ "github.com/lightningnetwork/lnd/chainio"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/fn"
"github.com/lightningnetwork/lnd/input"
@@ -65,7 +66,7 @@ type Bumper interface {
// and monitors its confirmation status for potential fee bumping. It
// returns a chan that the caller can use to receive updates about the
// broadcast result and potential RBF attempts.
- Broadcast(req *BumpRequest) (<-chan *BumpResult, error)
+ Broadcast(req *BumpRequest) <-chan *BumpResult
}
// BumpEvent represents the event of a fee bumping attempt.
@@ -75,7 +76,17 @@ const (
// TxPublished is sent when the broadcast attempt is finished.
TxPublished BumpEvent = iota
- // TxFailed is sent when the broadcast attempt fails.
+ // TxFailed is sent when the tx has encountered a fee-related error
+ // during its creation or broadcast, or an internal error from the fee
+ // bumper. In either case the inputs in this tx should be retried with
+ // either a different grouping strategy or an increased budget.
+ //
+ // NOTE: We also send this event when there's a third party spend
+ // event, and the sweeper will handle cleaning this up once it's
+ // confirmed.
+ //
+ // TODO(yy): Remove the above usage once we remove sweeping non-CPFP
+ // anchors.
TxFailed
// TxReplaced is sent when the original tx is replaced by a new one.
@@ -84,6 +95,11 @@ const (
// TxConfirmed is sent when the tx is confirmed.
TxConfirmed
+ // TxFatal is sent when the inputs in this tx cannot be retried. Txns
+ // will end up in this state if they have encountered a non-fee related
+ // error, which means they cannot be retried with increased budget.
+ TxFatal
+
// sentinalEvent is used to check if an event is unknown.
sentinalEvent
)
@@ -99,6 +115,8 @@ func (e BumpEvent) String() string {
return "Replaced"
case TxConfirmed:
return "Confirmed"
+ case TxFatal:
+ return "Fatal"
default:
return "Unknown"
}
@@ -136,6 +154,10 @@ type BumpRequest struct {
// ExtraTxOut tracks if this bump request has an optional set of extra
// outputs to add to the transaction.
ExtraTxOut fn.Option[SweepOutput]
+
+ // Immediate is used to specify that the tx should be broadcast
+ // immediately.
+ Immediate bool
}
// MaxFeeRateAllowed returns the maximum fee rate allowed for the given
@@ -246,10 +268,22 @@ type BumpResult struct {
requestID uint64
}
+// String returns a human-readable string for the result.
+func (b *BumpResult) String() string {
+ desc := fmt.Sprintf("Event=%v", b.Event)
+ if b.Tx != nil {
+ desc += fmt.Sprintf(", Tx=%v", b.Tx.TxHash())
+ }
+
+ return fmt.Sprintf("[%s]", desc)
+}
+
// Validate validates the BumpResult so it's safe to use.
func (b *BumpResult) Validate() error {
- // Every result must have a tx.
- if b.Tx == nil {
+ isFailureEvent := b.Event == TxFailed || b.Event == TxFatal
+
+ // Every result must have a tx except the fatal or failed case.
+ if b.Tx == nil && !isFailureEvent {
return fmt.Errorf("%w: nil tx", ErrInvalidBumpResult)
}
@@ -263,8 +297,8 @@ func (b *BumpResult) Validate() error {
return fmt.Errorf("%w: nil replacing tx", ErrInvalidBumpResult)
}
- // If it's a failed event, it must have an error.
- if b.Event == TxFailed && b.Err == nil {
+ // If it's a failed or fatal event, it must have an error.
+ if isFailureEvent && b.Err == nil {
return fmt.Errorf("%w: nil error", ErrInvalidBumpResult)
}
@@ -311,6 +345,10 @@ type TxPublisher struct {
started atomic.Bool
stopped atomic.Bool
+ // Embed the blockbeat consumer struct to get access to the method
+ // `NotifyBlockProcessed` and the `BlockbeatChan`.
+ chainio.BeatConsumer
+
wg sync.WaitGroup
// cfg specifies the configuration of the TxPublisher.
@@ -338,14 +376,22 @@ type TxPublisher struct {
// Compile-time constraint to ensure TxPublisher implements Bumper.
var _ Bumper = (*TxPublisher)(nil)
+// Compile-time check for the chainio.Consumer interface.
+var _ chainio.Consumer = (*TxPublisher)(nil)
+
// NewTxPublisher creates a new TxPublisher.
func NewTxPublisher(cfg TxPublisherConfig) *TxPublisher {
- return &TxPublisher{
+ tp := &TxPublisher{
cfg: &cfg,
records: lnutils.SyncMap[uint64, *monitorRecord]{},
subscriberChans: lnutils.SyncMap[uint64, chan *BumpResult]{},
quit: make(chan struct{}),
}
+
+ // Mount the block consumer.
+ tp.BeatConsumer = chainio.NewBeatConsumer(tp.quit, tp.Name())
+
+ return tp
}
// isNeutrinoBackend checks if the wallet backend is neutrino.
@@ -353,60 +399,69 @@ func (t *TxPublisher) isNeutrinoBackend() bool {
return t.cfg.Wallet.BackEnd() == "neutrino"
}
-// Broadcast is used to publish the tx created from the given inputs. It will,
-// 1. init a fee function based on the given strategy.
-// 2. create an RBF-compliant tx and monitor it for confirmation.
-// 3. notify the initial broadcast result back to the caller.
-// The initial broadcast is guaranteed to be RBF-compliant unless the budget
-// specified cannot cover the fee.
+// Broadcast is used to publish the tx created from the given inputs. It will
+// register the broadcast request and return a chan to the caller to subscribe
+// the broadcast result. The initial broadcast is guaranteed to be
+// RBF-compliant unless the budget specified cannot cover the fee.
//
// NOTE: part of the Bumper interface.
-func (t *TxPublisher) Broadcast(req *BumpRequest) (<-chan *BumpResult, error) {
- log.Tracef("Received broadcast request: %s", lnutils.SpewLogClosure(
- req))
+func (t *TxPublisher) Broadcast(req *BumpRequest) <-chan *BumpResult {
+ log.Tracef("Received broadcast request: %s",
+ lnutils.SpewLogClosure(req))
- // Attempt an initial broadcast which is guaranteed to comply with the
- // RBF rules.
- result, err := t.initialBroadcast(req)
- if err != nil {
- log.Errorf("Initial broadcast failed: %v", err)
-
- return nil, err
- }
+ // Store the request.
+ requestID, record := t.storeInitialRecord(req)
// Create a chan to send the result to the caller.
subscriber := make(chan *BumpResult, 1)
- t.subscriberChans.Store(result.requestID, subscriber)
+ t.subscriberChans.Store(requestID, subscriber)
- // Send the initial broadcast result to the caller.
- t.handleResult(result)
+ // Publish the tx immediately if specified.
+ if req.Immediate {
+ t.handleInitialBroadcast(record, requestID)
+ }
+
+ return subscriber
+}
+
+// storeInitialRecord initializes a monitor record and saves it in the map.
+func (t *TxPublisher) storeInitialRecord(req *BumpRequest) (
+ uint64, *monitorRecord) {
+
+ // Increase the request counter.
+ //
+ // NOTE: this is the only place where we increase the counter.
+ requestID := t.requestCounter.Add(1)
+
+ // Register the record.
+ record := &monitorRecord{req: req}
+ t.records.Store(requestID, record)
+
+ return requestID, record
+}
- return subscriber, nil
+// NOTE: part of the `chainio.Consumer` interface.
+func (t *TxPublisher) Name() string {
+ return "TxPublisher"
}
-// initialBroadcast initializes a fee function, creates an RBF-compliant tx and
-// broadcasts it.
-func (t *TxPublisher) initialBroadcast(req *BumpRequest) (*BumpResult, error) {
+// initializeTx initializes a fee function and creates an RBF-compliant tx. If
+// succeeded, the initial tx is stored in the records map.
+func (t *TxPublisher) initializeTx(requestID uint64, req *BumpRequest) error {
// Create a fee bumping algorithm to be used for future RBF.
feeAlgo, err := t.initializeFeeFunction(req)
if err != nil {
- return nil, fmt.Errorf("init fee function: %w", err)
+ return fmt.Errorf("init fee function: %w", err)
}
// Create the initial tx to be broadcasted. This tx is guaranteed to
// comply with the RBF restrictions.
- requestID, err := t.createRBFCompliantTx(req, feeAlgo)
+ err = t.createRBFCompliantTx(requestID, req, feeAlgo)
if err != nil {
- return nil, fmt.Errorf("create RBF-compliant tx: %w", err)
+ return fmt.Errorf("create RBF-compliant tx: %w", err)
}
- // Broadcast the tx and return the monitored record.
- result, err := t.broadcast(requestID)
- if err != nil {
- return nil, fmt.Errorf("broadcast sweep tx: %w", err)
- }
-
- return result, nil
+ return nil
}
// initializeFeeFunction initializes a fee function to be used for this request
@@ -442,8 +497,8 @@ func (t *TxPublisher) initializeFeeFunction(
// so by creating a tx, validate it using `TestMempoolAccept`, and bump its fee
// and redo the process until the tx is valid, or return an error when non-RBF
// related errors occur or the budget has been used up.
-func (t *TxPublisher) createRBFCompliantTx(req *BumpRequest,
- f FeeFunction) (uint64, error) {
+func (t *TxPublisher) createRBFCompliantTx(requestID uint64, req *BumpRequest,
+ f FeeFunction) error {
for {
// Create a new tx with the given fee rate and check its
@@ -452,17 +507,19 @@ func (t *TxPublisher) createRBFCompliantTx(req *BumpRequest,
switch {
case err == nil:
- // The tx is valid, return the request ID.
- requestID := t.storeRecord(
- sweepCtx.tx, req, f, sweepCtx.fee,
+ // The tx is valid, store it.
+ t.storeRecord(
+ requestID, sweepCtx.tx, req, f, sweepCtx.fee,
+ sweepCtx.outpointToTxIndex,
)
- log.Infof("Created tx %v for %v inputs: feerate=%v, "+
- "fee=%v, inputs=%v", sweepCtx.tx.TxHash(),
- len(req.Inputs), f.FeeRate(), sweepCtx.fee,
+ log.Infof("Created initial sweep tx=%v for %v inputs: "+
+ "feerate=%v, fee=%v, inputs:\n%v",
+ sweepCtx.tx.TxHash(), len(req.Inputs),
+ f.FeeRate(), sweepCtx.fee,
inputTypeSummary(req.Inputs))
- return requestID, nil
+ return nil
// If the error indicates the fees paid is not enough, we will
// ask the fee function to increase the fee rate and retry.
@@ -493,7 +550,7 @@ func (t *TxPublisher) createRBFCompliantTx(req *BumpRequest,
// cluster these inputs differetly.
increased, err = f.Increment()
if err != nil {
- return 0, err
+ return err
}
}
@@ -503,30 +560,24 @@ func (t *TxPublisher) createRBFCompliantTx(req *BumpRequest,
// mempool acceptance.
default:
log.Debugf("Failed to create RBF-compliant tx: %v", err)
- return 0, err
+ return err
}
}
}
// storeRecord stores the given record in the records map.
-func (t *TxPublisher) storeRecord(tx *wire.MsgTx, req *BumpRequest,
- f FeeFunction, fee btcutil.Amount) uint64 {
-
- // Increase the request counter.
- //
- // NOTE: this is the only place where we increase the
- // counter.
- requestID := t.requestCounter.Add(1)
+func (t *TxPublisher) storeRecord(requestID uint64, tx *wire.MsgTx,
+ req *BumpRequest, f FeeFunction, fee btcutil.Amount,
+ outpointToTxIndex map[wire.OutPoint]int) {
// Register the record.
t.records.Store(requestID, &monitorRecord{
- tx: tx,
- req: req,
- feeFunction: f,
- fee: fee,
+ tx: tx,
+ req: req,
+ feeFunction: f,
+ fee: fee,
+ outpointToTxIndex: outpointToTxIndex,
})
-
- return requestID
}
// createAndCheckTx creates a tx based on the given inputs, change output
@@ -656,8 +707,7 @@ func (t *TxPublisher) notifyResult(result *BumpResult) {
return
}
- log.Debugf("Sending result for requestID=%v, tx=%v", id,
- result.Tx.TxHash())
+ log.Debugf("Sending result %v for requestID=%v", result, id)
select {
// Send the result to the subscriber.
@@ -675,20 +725,31 @@ func (t *TxPublisher) notifyResult(result *BumpResult) {
func (t *TxPublisher) removeResult(result *BumpResult) {
id := result.requestID
- // Remove the record from the maps if there's an error. This means this
- // tx has failed its broadcast and cannot be retried. There are two
- // cases,
- // - when the budget cannot cover the fee.
- // - when a non-RBF related error occurs.
+ var txid chainhash.Hash
+ if result.Tx != nil {
+ txid = result.Tx.TxHash()
+ }
+
+ // Remove the record from the maps if there's an error or the tx is
+ // confirmed. When there's an error, it means this tx has failed its
+ // broadcast and cannot be retried. There are two cases it may fail,
+ // - when the budget cannot cover the increased fee calculated by the
+ // fee function, hence the budget is used up.
+ // - when a non-fee related error returned from PublishTransaction.
switch result.Event {
case TxFailed:
log.Errorf("Removing monitor record=%v, tx=%v, due to err: %v",
- id, result.Tx.TxHash(), result.Err)
+ id, txid, result.Err)
case TxConfirmed:
- // Remove the record is the tx is confirmed.
+ // Remove the record if the tx is confirmed.
log.Debugf("Removing confirmed monitor record=%v, tx=%v", id,
- result.Tx.TxHash())
+ txid)
+
+ case TxFatal:
+ // Remove the record if there's an error.
+ log.Debugf("Removing monitor record=%v due to fatal err: %v",
+ id, result.Err)
// Do nothing if it's neither failed or confirmed.
default:
@@ -734,20 +795,18 @@ type monitorRecord struct {
// Start starts the publisher by subscribing to block epoch updates and kicking
// off the monitor loop.
-func (t *TxPublisher) Start() error {
+func (t *TxPublisher) Start(beat chainio.Blockbeat) error {
log.Info("TxPublisher starting...")
if t.started.Swap(true) {
return fmt.Errorf("TxPublisher started more than once")
}
- blockEvent, err := t.cfg.Notifier.RegisterBlockEpochNtfn(nil)
- if err != nil {
- return fmt.Errorf("register block epoch ntfn: %w", err)
- }
+ // Set the current height.
+ t.currentHeight.Store(beat.Height())
t.wg.Add(1)
- go t.monitor(blockEvent)
+ go t.monitor()
log.Debugf("TxPublisher started")
@@ -775,33 +834,25 @@ func (t *TxPublisher) Stop() error {
// to be bumped. If so, it will attempt to bump the fee of the tx.
//
// NOTE: Must be run as a goroutine.
-func (t *TxPublisher) monitor(blockEvent *chainntnfs.BlockEpochEvent) {
- defer blockEvent.Cancel()
+func (t *TxPublisher) monitor() {
defer t.wg.Done()
for {
select {
- case epoch, ok := <-blockEvent.Epochs:
- if !ok {
- // We should stop the publisher before stopping
- // the chain service. Otherwise it indicates an
- // error.
- log.Error("Block epoch channel closed, exit " +
- "monitor")
-
- return
- }
-
- log.Debugf("TxPublisher received new block: %v",
- epoch.Height)
+ case beat := <-t.BlockbeatChan:
+ height := beat.Height()
+ log.Debugf("TxPublisher received new block: %v", height)
// Update the best known height for the publisher.
- t.currentHeight.Store(epoch.Height)
+ t.currentHeight.Store(height)
// Check all monitored txns to see if any of them needs
// to be bumped.
t.processRecords()
+ // Notify we've processed the block.
+ t.NotifyBlockProcessed(beat, nil)
+
case <-t.quit:
log.Debug("Fee bumper stopped, exit monitor")
return
@@ -816,18 +867,27 @@ func (t *TxPublisher) processRecords() {
// confirmed.
confirmedRecords := make(map[uint64]*monitorRecord)
- // feeBumpRecords stores a map of the records which need to be bumped.
+ // feeBumpRecords stores a map of records which need to be bumped.
feeBumpRecords := make(map[uint64]*monitorRecord)
- // failedRecords stores a map of the records which has inputs being
- // spent by a third party.
+ // failedRecords stores a map of records which has inputs being spent
+ // by a third party.
//
// NOTE: this is only used for neutrino backend.
failedRecords := make(map[uint64]*monitorRecord)
+ // initialRecords stores a map of records which are being created and
+ // published for the first time.
+ initialRecords := make(map[uint64]*monitorRecord)
+
// visitor is a helper closure that visits each record and divides them
// into two groups.
visitor := func(requestID uint64, r *monitorRecord) error {
+ if r.tx == nil {
+ initialRecords[requestID] = r
+ return nil
+ }
+
log.Tracef("Checking monitor recordID=%v for tx=%v", requestID,
r.tx.TxHash())
@@ -855,17 +915,20 @@ func (t *TxPublisher) processRecords() {
return nil
}
- // Iterate through all the records and divide them into two groups.
+ // Iterate through all the records and divide them into four groups.
t.records.ForEach(visitor)
+ // Handle the initial broadcast.
+ for requestID, r := range initialRecords {
+ t.handleInitialBroadcast(r, requestID)
+ }
+
// For records that are confirmed, we'll notify the caller about this
// result.
for requestID, r := range confirmedRecords {
- rec := r
-
log.Debugf("Tx=%v is confirmed", r.tx.TxHash())
t.wg.Add(1)
- go t.handleTxConfirmed(rec, requestID)
+ go t.handleTxConfirmed(r, requestID)
}
// Get the current height to be used in the following goroutines.
@@ -873,22 +936,18 @@ func (t *TxPublisher) processRecords() {
// For records that are not confirmed, we perform a fee bump if needed.
for requestID, r := range feeBumpRecords {
- rec := r
-
log.Debugf("Attempting to fee bump Tx=%v", r.tx.TxHash())
t.wg.Add(1)
- go t.handleFeeBumpTx(requestID, rec, currentHeight)
+ go t.handleFeeBumpTx(requestID, r, currentHeight)
}
// For records that are failed, we'll notify the caller about this
// result.
for requestID, r := range failedRecords {
- rec := r
-
log.Debugf("Tx=%v has inputs been spent by a third party, "+
"failing it now", r.tx.TxHash())
t.wg.Add(1)
- go t.handleThirdPartySpent(rec, requestID)
+ go t.handleThirdPartySpent(r, requestID)
}
}
@@ -913,6 +972,96 @@ func (t *TxPublisher) handleTxConfirmed(r *monitorRecord, requestID uint64) {
t.handleResult(result)
}
+// handleInitialTxError takes the error from `initializeTx` and decides the
+// bump event. It will construct a BumpResult and handles it.
+func (t *TxPublisher) handleInitialTxError(requestID uint64, err error) {
+ // We now decide what type of event to send.
+ var event BumpEvent
+
+ switch {
+ // When the error is due to a dust output, we'll send a TxFailed so
+ // these inputs can be retried with a different group in the next
+ // block.
+ case errors.Is(err, ErrTxNoOutput):
+ event = TxFailed
+
+ // When the error is due to budget being used up, we'll send a TxFailed
+ // so these inputs can be retried with a different group in the next
+ // block.
+ case errors.Is(err, ErrMaxPosition):
+ event = TxFailed
+
+ // When the error is due to zero fee rate delta, we'll send a TxFailed
+ // so these inputs can be retried in the next block.
+ case errors.Is(err, ErrZeroFeeRateDelta):
+ event = TxFailed
+
+ // Otherwise this is not a fee-related error and the tx cannot be
+ // retried. In that case we will fail ALL the inputs in this tx, which
+ // means they will be removed from the sweeper and never be tried
+ // again.
+ //
+ // TODO(yy): Find out which input is causing the failure and fail that
+ // one only.
+ default:
+ event = TxFatal
+ }
+
+ result := &BumpResult{
+ Event: event,
+ Err: err,
+ requestID: requestID,
+ }
+
+ t.handleResult(result)
+}
+
+// handleInitialBroadcast is called when a new request is received. It will
+// handle the initial tx creation and broadcast. In details,
+// 1. init a fee function based on the given strategy.
+// 2. create an RBF-compliant tx and monitor it for confirmation.
+// 3. notify the initial broadcast result back to the caller.
+func (t *TxPublisher) handleInitialBroadcast(r *monitorRecord,
+ requestID uint64) {
+
+ log.Debugf("Initial broadcast for requestID=%v", requestID)
+
+ var (
+ result *BumpResult
+ err error
+ )
+
+ // Attempt an initial broadcast which is guaranteed to comply with the
+ // RBF rules.
+ //
+ // Create the initial tx to be broadcasted.
+ err = t.initializeTx(requestID, r.req)
+ if err != nil {
+ log.Errorf("Initial broadcast failed: %v", err)
+
+ // We now handle the initialization error and exit.
+ t.handleInitialTxError(requestID, err)
+
+ return
+ }
+
+ // Successfully created the first tx, now broadcast it.
+ result, err = t.broadcast(requestID)
+ if err != nil {
+ // The broadcast failed, which can only happen if the tx record
+ // cannot be found or the aux sweeper returns an error. In
+ // either case, we will send back a TxFail event so these
+ // inputs can be retried.
+ result = &BumpResult{
+ Event: TxFailed,
+ Err: err,
+ requestID: requestID,
+ }
+ }
+
+ t.handleResult(result)
+}
+
// handleFeeBumpTx checks if the tx needs to be bumped, and if so, it will
// attempt to bump the fee of the tx.
//
@@ -1439,7 +1588,10 @@ func prepareSweepTx(inputs []input.Input, changePkScript lnwallet.AddrWithKey,
// Check if the lock time has reached
if lt > uint32(currentHeight) {
- return 0, noChange, noLocktime, ErrLocktimeImmature
+ return 0, noChange, noLocktime,
+ fmt.Errorf("%w: current height is %v, "+
+ "locktime is %v", ErrLocktimeImmature,
+ currentHeight, lt)
}
// If another input commits to a different locktime, they
diff --git a/sweep/fee_bumper_test.go b/sweep/fee_bumper_test.go
index 53b38607f7..3e6ffc1c66 100644
--- a/sweep/fee_bumper_test.go
+++ b/sweep/fee_bumper_test.go
@@ -91,6 +91,12 @@ func TestBumpResultValidate(t *testing.T) {
}
require.ErrorIs(t, b.Validate(), ErrInvalidBumpResult)
+ // A fatal event without a failure reason will give an error.
+ b = BumpResult{
+ Event: TxFailed,
+ }
+ require.ErrorIs(t, b.Validate(), ErrInvalidBumpResult)
+
// A confirmed event without fee info will give an error.
b = BumpResult{
Tx: &wire.MsgTx{},
@@ -104,6 +110,20 @@ func TestBumpResultValidate(t *testing.T) {
Event: TxPublished,
}
require.NoError(t, b.Validate())
+
+ // Tx is allowed to be nil in a TxFailed event.
+ b = BumpResult{
+ Event: TxFailed,
+ Err: errDummy,
+ }
+ require.NoError(t, b.Validate())
+
+ // Tx is allowed to be nil in a TxFatal event.
+ b = BumpResult{
+ Event: TxFatal,
+ Err: errDummy,
+ }
+ require.NoError(t, b.Validate())
}
// TestCalcSweepTxWeight checks that the weight of the sweep tx is calculated
@@ -323,19 +343,25 @@ func TestStoreRecord(t *testing.T) {
// Get the current counter and check it's increased later.
initialCounter := tp.requestCounter.Load()
- // Call the method under test.
- requestID := tp.storeRecord(tx, req, feeFunc, fee)
+ op := wire.OutPoint{
+ Hash: chainhash.Hash{1},
+ Index: 0,
+ }
+ utxoIndex := map[wire.OutPoint]int{
+ op: 0,
+ }
- // Check the request ID is as expected.
- require.Equal(t, initialCounter+1, requestID)
+ // Call the method under test.
+ tp.storeRecord(initialCounter, tx, req, feeFunc, fee, utxoIndex)
// Read the saved record and compare.
- record, ok := tp.records.Load(requestID)
+ record, ok := tp.records.Load(initialCounter)
require.True(t, ok)
require.Equal(t, tx, record.tx)
require.Equal(t, feeFunc, record.feeFunction)
require.Equal(t, fee, record.fee)
require.Equal(t, req, record.req)
+ require.Equal(t, utxoIndex, record.outpointToTxIndex)
}
// mockers wraps a list of mocked interfaces used inside tx publisher.
@@ -626,23 +652,19 @@ func TestCreateRBFCompliantTx(t *testing.T) {
},
}
+ var requestCounter atomic.Uint64
for _, tc := range testCases {
tc := tc
+ rid := requestCounter.Add(1)
t.Run(tc.name, func(t *testing.T) {
tc.setupMock()
// Call the method under test.
- id, err := tp.createRBFCompliantTx(req, m.feeFunc)
+ err := tp.createRBFCompliantTx(rid, req, m.feeFunc)
// Check the result is as expected.
require.ErrorIs(t, err, tc.expectedErr)
-
- // If there's an error, expect the requestID to be
- // empty.
- if tc.expectedErr != nil {
- require.Zero(t, id)
- }
})
}
}
@@ -665,9 +687,18 @@ func TestTxPublisherBroadcast(t *testing.T) {
feerate := chainfee.SatPerKWeight(1000)
m.feeFunc.On("FeeRate").Return(feerate)
+ op := wire.OutPoint{
+ Hash: chainhash.Hash{1},
+ Index: 0,
+ }
+ utxoIndex := map[wire.OutPoint]int{
+ op: 0,
+ }
+
// Create a testing record and put it in the map.
fee := btcutil.Amount(1000)
- requestID := tp.storeRecord(tx, req, m.feeFunc, fee)
+ requestID := uint64(1)
+ tp.storeRecord(requestID, tx, req, m.feeFunc, fee, utxoIndex)
// Quickly check when the requestID cannot be found, an error is
// returned.
@@ -754,6 +785,17 @@ func TestRemoveResult(t *testing.T) {
// Create a testing record and put it in the map.
fee := btcutil.Amount(1000)
+ op := wire.OutPoint{
+ Hash: chainhash.Hash{1},
+ Index: 0,
+ }
+ utxoIndex := map[wire.OutPoint]int{
+ op: 0,
+ }
+
+ // Create a test request ID counter.
+ requestCounter := atomic.Uint64{}
+
testCases := []struct {
name string
setupRecord func() uint64
@@ -765,10 +807,13 @@ func TestRemoveResult(t *testing.T) {
// removed.
name: "remove on TxConfirmed",
setupRecord: func() uint64 {
- id := tp.storeRecord(tx, req, m.feeFunc, fee)
- tp.subscriberChans.Store(id, nil)
+ rid := requestCounter.Add(1)
+ tp.storeRecord(
+ rid, tx, req, m.feeFunc, fee, utxoIndex,
+ )
+ tp.subscriberChans.Store(rid, nil)
- return id
+ return rid
},
result: &BumpResult{
Event: TxConfirmed,
@@ -780,10 +825,13 @@ func TestRemoveResult(t *testing.T) {
// When the tx is failed, the records will be removed.
name: "remove on TxFailed",
setupRecord: func() uint64 {
- id := tp.storeRecord(tx, req, m.feeFunc, fee)
- tp.subscriberChans.Store(id, nil)
+ rid := requestCounter.Add(1)
+ tp.storeRecord(
+ rid, tx, req, m.feeFunc, fee, utxoIndex,
+ )
+ tp.subscriberChans.Store(rid, nil)
- return id
+ return rid
},
result: &BumpResult{
Event: TxFailed,
@@ -796,10 +844,13 @@ func TestRemoveResult(t *testing.T) {
// Noop when the tx is neither confirmed or failed.
name: "noop when tx is not confirmed or failed",
setupRecord: func() uint64 {
- id := tp.storeRecord(tx, req, m.feeFunc, fee)
- tp.subscriberChans.Store(id, nil)
+ rid := requestCounter.Add(1)
+ tp.storeRecord(
+ rid, tx, req, m.feeFunc, fee, utxoIndex,
+ )
+ tp.subscriberChans.Store(rid, nil)
- return id
+ return rid
},
result: &BumpResult{
Event: TxPublished,
@@ -844,9 +895,18 @@ func TestNotifyResult(t *testing.T) {
// Create a test tx.
tx := &wire.MsgTx{LockTime: 1}
+ op := wire.OutPoint{
+ Hash: chainhash.Hash{1},
+ Index: 0,
+ }
+ utxoIndex := map[wire.OutPoint]int{
+ op: 0,
+ }
+
// Create a testing record and put it in the map.
fee := btcutil.Amount(1000)
- requestID := tp.storeRecord(tx, req, m.feeFunc, fee)
+ requestID := uint64(1)
+ tp.storeRecord(requestID, tx, req, m.feeFunc, fee, utxoIndex)
// Create a subscription to the event.
subscriber := make(chan *BumpResult, 1)
@@ -894,41 +954,17 @@ func TestNotifyResult(t *testing.T) {
}
}
-// TestBroadcastSuccess checks the public `Broadcast` method can successfully
-// broadcast a tx based on the request.
-func TestBroadcastSuccess(t *testing.T) {
+// TestBroadcast checks the public `Broadcast` method can successfully register
+// a broadcast request.
+func TestBroadcast(t *testing.T) {
t.Parallel()
// Create a publisher using the mocks.
- tp, m := createTestPublisher(t)
+ tp, _ := createTestPublisher(t)
// Create a test feerate.
feerate := chainfee.SatPerKWeight(1000)
- // Mock the fee estimator to return the testing fee rate.
- //
- // We are not testing `NewLinearFeeFunction` here, so the actual params
- // used are irrelevant.
- m.estimator.On("EstimateFeePerKW", mock.Anything).Return(
- feerate, nil).Once()
- m.estimator.On("RelayFeePerKW").Return(chainfee.FeePerKwFloor).Once()
-
- // Mock the signer to always return a valid script.
- //
- // NOTE: we are not testing the utility of creating valid txes here, so
- // this is fine to be mocked. This behaves essentially as skipping the
- // Signer check and alaways assume the tx has a valid sig.
- script := &input.Script{}
- m.signer.On("ComputeInputScript", mock.Anything,
- mock.Anything).Return(script, nil)
-
- // Mock the testmempoolaccept to pass.
- m.wallet.On("CheckMempoolAcceptance", mock.Anything).Return(nil).Once()
-
- // Mock the wallet to publish successfully.
- m.wallet.On("PublishTransaction",
- mock.Anything, mock.Anything).Return(nil).Once()
-
// Create a test request.
inp := createTestInput(1000, input.WitnessKeyHash)
@@ -942,27 +978,24 @@ func TestBroadcastSuccess(t *testing.T) {
}
// Send the req and expect no error.
- resultChan, err := tp.Broadcast(req)
- require.NoError(t, err)
-
- // Check the result is sent back.
- select {
- case <-time.After(time.Second):
- t.Fatal("timeout waiting for subscriber to receive result")
-
- case result := <-resultChan:
- // We expect the first result to be TxPublished.
- require.Equal(t, TxPublished, result.Event)
- }
+ resultChan := tp.Broadcast(req)
+ require.NotNil(t, resultChan)
// Validate the record was stored.
require.Equal(t, 1, tp.records.Len())
require.Equal(t, 1, tp.subscriberChans.Len())
+
+ // Validate the record.
+ rid := tp.requestCounter.Load()
+ record, found := tp.records.Load(rid)
+ require.True(t, found)
+ require.Equal(t, req, record.req)
}
-// TestBroadcastFail checks the public `Broadcast` returns the error or a
-// failed result when the broadcast fails.
-func TestBroadcastFail(t *testing.T) {
+// TestBroadcastImmediate checks the public `Broadcast` method can successfully
+// register a broadcast request and publish the tx when `Immediate` flag is
+// set.
+func TestBroadcastImmediate(t *testing.T) {
t.Parallel()
// Create a publisher using the mocks.
@@ -981,64 +1014,27 @@ func TestBroadcastFail(t *testing.T) {
Budget: btcutil.Amount(1000),
MaxFeeRate: feerate * 10,
DeadlineHeight: 10,
+ Immediate: true,
}
- // Mock the fee estimator to return the testing fee rate.
+ // Mock the fee estimator to return an error.
//
- // We are not testing `NewLinearFeeFunction` here, so the actual params
- // used are irrelevant.
+ // NOTE: We are not testing `handleInitialBroadcast` here, but only
+ // interested in checking that this method is indeed called when
+ // `Immediate` is true. Thus we mock the method to return an error to
+ // quickly abort. As long as this mocked method is called, we know the
+ // `Immediate` flag works.
m.estimator.On("EstimateFeePerKW", mock.Anything).Return(
- feerate, nil).Twice()
- m.estimator.On("RelayFeePerKW").Return(chainfee.FeePerKwFloor).Twice()
-
- // Mock the signer to always return a valid script.
- //
- // NOTE: we are not testing the utility of creating valid txes here, so
- // this is fine to be mocked. This behaves essentially as skipping the
- // Signer check and alaways assume the tx has a valid sig.
- script := &input.Script{}
- m.signer.On("ComputeInputScript", mock.Anything,
- mock.Anything).Return(script, nil)
-
- // Mock the testmempoolaccept to return an error.
- m.wallet.On("CheckMempoolAcceptance",
- mock.Anything).Return(errDummy).Once()
-
- // Send the req and expect an error returned.
- resultChan, err := tp.Broadcast(req)
- require.ErrorIs(t, err, errDummy)
- require.Nil(t, resultChan)
-
- // Validate the record was NOT stored.
- require.Equal(t, 0, tp.records.Len())
- require.Equal(t, 0, tp.subscriberChans.Len())
-
- // Mock the testmempoolaccept again, this time it passes.
- m.wallet.On("CheckMempoolAcceptance", mock.Anything).Return(nil).Once()
-
- // Mock the wallet to fail on publish.
- m.wallet.On("PublishTransaction",
- mock.Anything, mock.Anything).Return(errDummy).Once()
-
- // Send the req and expect no error returned.
- resultChan, err = tp.Broadcast(req)
- require.NoError(t, err)
-
- // Check the result is sent back.
- select {
- case <-time.After(time.Second):
- t.Fatal("timeout waiting for subscriber to receive result")
+ chainfee.SatPerKWeight(0), errDummy).Once()
- case result := <-resultChan:
- // We expect the result to be TxFailed and the error is set in
- // the result.
- require.Equal(t, TxFailed, result.Event)
- require.ErrorIs(t, result.Err, errDummy)
- }
+ // Send the req and expect no error.
+ resultChan := tp.Broadcast(req)
+ require.NotNil(t, resultChan)
- // Validate the record was removed.
- require.Equal(t, 0, tp.records.Len())
- require.Equal(t, 0, tp.subscriberChans.Len())
+ // Validate the record was removed due to an error returned in initial
+ // broadcast.
+ require.Empty(t, tp.records.Len())
+ require.Empty(t, tp.subscriberChans.Len())
}
// TestCreateAnPublishFail checks all the error cases are handled properly in
@@ -1201,9 +1197,18 @@ func TestHandleTxConfirmed(t *testing.T) {
// Create a test tx.
tx := &wire.MsgTx{LockTime: 1}
+ op := wire.OutPoint{
+ Hash: chainhash.Hash{1},
+ Index: 0,
+ }
+ utxoIndex := map[wire.OutPoint]int{
+ op: 0,
+ }
+
// Create a testing record and put it in the map.
fee := btcutil.Amount(1000)
- requestID := tp.storeRecord(tx, req, m.feeFunc, fee)
+ requestID := uint64(1)
+ tp.storeRecord(requestID, tx, req, m.feeFunc, fee, utxoIndex)
record, ok := tp.records.Load(requestID)
require.True(t, ok)
@@ -1273,9 +1278,18 @@ func TestHandleFeeBumpTx(t *testing.T) {
tx: tx,
}
+ op := wire.OutPoint{
+ Hash: chainhash.Hash{1},
+ Index: 0,
+ }
+ utxoIndex := map[wire.OutPoint]int{
+ op: 0,
+ }
+
// Create a testing record and put it in the map.
fee := btcutil.Amount(1000)
- requestID := tp.storeRecord(tx, req, m.feeFunc, fee)
+ requestID := uint64(1)
+ tp.storeRecord(requestID, tx, req, m.feeFunc, fee, utxoIndex)
// Create a subscription to the event.
subscriber := make(chan *BumpResult, 1)
@@ -1476,3 +1490,183 @@ func TestProcessRecords(t *testing.T) {
require.Equal(t, requestID2, result.requestID)
}
}
+
+// TestHandleInitialBroadcastSuccess checks `handleInitialBroadcast` method can
+// successfully broadcast a tx based on the request.
+func TestHandleInitialBroadcastSuccess(t *testing.T) {
+ t.Parallel()
+
+ // Create a publisher using the mocks.
+ tp, m := createTestPublisher(t)
+
+ // Create a test feerate.
+ feerate := chainfee.SatPerKWeight(1000)
+
+ // Mock the fee estimator to return the testing fee rate.
+ //
+ // We are not testing `NewLinearFeeFunction` here, so the actual params
+ // used are irrelevant.
+ m.estimator.On("EstimateFeePerKW", mock.Anything).Return(
+ feerate, nil).Once()
+ m.estimator.On("RelayFeePerKW").Return(chainfee.FeePerKwFloor).Once()
+
+ // Mock the signer to always return a valid script.
+ //
+ // NOTE: we are not testing the utility of creating valid txes here, so
+ // this is fine to be mocked. This behaves essentially as skipping the
+ // Signer check and alaways assume the tx has a valid sig.
+ script := &input.Script{}
+ m.signer.On("ComputeInputScript", mock.Anything,
+ mock.Anything).Return(script, nil)
+
+ // Mock the testmempoolaccept to pass.
+ m.wallet.On("CheckMempoolAcceptance", mock.Anything).Return(nil).Once()
+
+ // Mock the wallet to publish successfully.
+ m.wallet.On("PublishTransaction",
+ mock.Anything, mock.Anything).Return(nil).Once()
+
+ // Create a test request.
+ inp := createTestInput(1000, input.WitnessKeyHash)
+
+ // Create a testing bump request.
+ req := &BumpRequest{
+ DeliveryAddress: changePkScript,
+ Inputs: []input.Input{&inp},
+ Budget: btcutil.Amount(1000),
+ MaxFeeRate: feerate * 10,
+ DeadlineHeight: 10,
+ }
+
+ // Register the testing record use `Broadcast`.
+ resultChan := tp.Broadcast(req)
+
+ // Grab the monitor record from the map.
+ rid := tp.requestCounter.Load()
+ rec, ok := tp.records.Load(rid)
+ require.True(t, ok)
+
+ // Call the method under test.
+ tp.wg.Add(1)
+ tp.handleInitialBroadcast(rec, rid)
+
+ // Check the result is sent back.
+ select {
+ case <-time.After(time.Second):
+ t.Fatal("timeout waiting for subscriber to receive result")
+
+ case result := <-resultChan:
+ // We expect the first result to be TxPublished.
+ require.Equal(t, TxPublished, result.Event)
+ }
+
+ // Validate the record was stored.
+ require.Equal(t, 1, tp.records.Len())
+ require.Equal(t, 1, tp.subscriberChans.Len())
+}
+
+// TestHandleInitialBroadcastFail checks `handleInitialBroadcast` returns the
+// error or a failed result when the broadcast fails.
+func TestHandleInitialBroadcastFail(t *testing.T) {
+ t.Parallel()
+
+ // Create a publisher using the mocks.
+ tp, m := createTestPublisher(t)
+
+ // Create a test feerate.
+ feerate := chainfee.SatPerKWeight(1000)
+
+ // Create a test request.
+ inp := createTestInput(1000, input.WitnessKeyHash)
+
+ // Create a testing bump request.
+ req := &BumpRequest{
+ DeliveryAddress: changePkScript,
+ Inputs: []input.Input{&inp},
+ Budget: btcutil.Amount(1000),
+ MaxFeeRate: feerate * 10,
+ DeadlineHeight: 10,
+ }
+
+ // Mock the fee estimator to return the testing fee rate.
+ //
+ // We are not testing `NewLinearFeeFunction` here, so the actual params
+ // used are irrelevant.
+ m.estimator.On("EstimateFeePerKW", mock.Anything).Return(
+ feerate, nil).Twice()
+ m.estimator.On("RelayFeePerKW").Return(chainfee.FeePerKwFloor).Twice()
+
+ // Mock the signer to always return a valid script.
+ //
+ // NOTE: we are not testing the utility of creating valid txes here, so
+ // this is fine to be mocked. This behaves essentially as skipping the
+ // Signer check and alaways assume the tx has a valid sig.
+ script := &input.Script{}
+ m.signer.On("ComputeInputScript", mock.Anything,
+ mock.Anything).Return(script, nil)
+
+ // Mock the testmempoolaccept to return an error.
+ m.wallet.On("CheckMempoolAcceptance",
+ mock.Anything).Return(errDummy).Once()
+
+ // Register the testing record use `Broadcast`.
+ resultChan := tp.Broadcast(req)
+
+ // Grab the monitor record from the map.
+ rid := tp.requestCounter.Load()
+ rec, ok := tp.records.Load(rid)
+ require.True(t, ok)
+
+ // Call the method under test and expect an error returned.
+ tp.wg.Add(1)
+ tp.handleInitialBroadcast(rec, rid)
+
+ // Check the result is sent back.
+ select {
+ case <-time.After(time.Second):
+ t.Fatal("timeout waiting for subscriber to receive result")
+
+ case result := <-resultChan:
+ // We expect the first result to be TxFatal.
+ require.Equal(t, TxFatal, result.Event)
+ }
+
+ // Validate the record was NOT stored.
+ require.Equal(t, 0, tp.records.Len())
+ require.Equal(t, 0, tp.subscriberChans.Len())
+
+ // Mock the testmempoolaccept again, this time it passes.
+ m.wallet.On("CheckMempoolAcceptance", mock.Anything).Return(nil).Once()
+
+ // Mock the wallet to fail on publish.
+ m.wallet.On("PublishTransaction",
+ mock.Anything, mock.Anything).Return(errDummy).Once()
+
+ // Register the testing record use `Broadcast`.
+ resultChan = tp.Broadcast(req)
+
+ // Grab the monitor record from the map.
+ rid = tp.requestCounter.Load()
+ rec, ok = tp.records.Load(rid)
+ require.True(t, ok)
+
+ // Call the method under test.
+ tp.wg.Add(1)
+ tp.handleInitialBroadcast(rec, rid)
+
+ // Check the result is sent back.
+ select {
+ case <-time.After(time.Second):
+ t.Fatal("timeout waiting for subscriber to receive result")
+
+ case result := <-resultChan:
+ // We expect the result to be TxFailed and the error is set in
+ // the result.
+ require.Equal(t, TxFailed, result.Event)
+ require.ErrorIs(t, result.Err, errDummy)
+ }
+
+ // Validate the record was removed.
+ require.Equal(t, 0, tp.records.Len())
+ require.Equal(t, 0, tp.subscriberChans.Len())
+}
diff --git a/sweep/fee_function.go b/sweep/fee_function.go
index cbf283e37d..15d44ed616 100644
--- a/sweep/fee_function.go
+++ b/sweep/fee_function.go
@@ -14,6 +14,9 @@ var (
// ErrMaxPosition is returned when trying to increase the position of
// the fee function while it's already at its max.
ErrMaxPosition = errors.New("position already at max")
+
+ // ErrZeroFeeRateDelta is returned when the fee rate delta is zero.
+ ErrZeroFeeRateDelta = errors.New("fee rate delta is zero")
)
// mSatPerKWeight represents a fee rate in msat/kw.
@@ -169,7 +172,7 @@ func NewLinearFeeFunction(maxFeeRate chainfee.SatPerKWeight,
"endingFeeRate=%v, width=%v, delta=%v", start, end,
l.width, l.deltaFeeRate)
- return nil, fmt.Errorf("fee rate delta is zero")
+ return nil, ErrZeroFeeRateDelta
}
// Attach the calculated values to the fee function.
diff --git a/sweep/mock_test.go b/sweep/mock_test.go
index 34202b1453..e1ad73d8da 100644
--- a/sweep/mock_test.go
+++ b/sweep/mock_test.go
@@ -268,6 +268,13 @@ func (m *MockInputSet) StartingFeeRate() fn.Option[chainfee.SatPerKWeight] {
return args.Get(0).(fn.Option[chainfee.SatPerKWeight])
}
+// Immediate returns whether the inputs should be swept immediately.
+func (m *MockInputSet) Immediate() bool {
+ args := m.Called()
+
+ return args.Bool(0)
+}
+
// MockBumper is a mock implementation of the interface Bumper.
type MockBumper struct {
mock.Mock
@@ -277,14 +284,14 @@ type MockBumper struct {
var _ Bumper = (*MockBumper)(nil)
// Broadcast broadcasts the transaction to the network.
-func (m *MockBumper) Broadcast(req *BumpRequest) (<-chan *BumpResult, error) {
+func (m *MockBumper) Broadcast(req *BumpRequest) <-chan *BumpResult {
args := m.Called(req)
if args.Get(0) == nil {
- return nil, args.Error(1)
+ return nil
}
- return args.Get(0).(chan *BumpResult), args.Error(1)
+ return args.Get(0).(chan *BumpResult)
}
// MockFeeFunction is a mock implementation of the FeeFunction interface.
diff --git a/sweep/sweeper.go b/sweep/sweeper.go
index 6257faac1f..0d249acc82 100644
--- a/sweep/sweeper.go
+++ b/sweep/sweeper.go
@@ -10,6 +10,7 @@ import (
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/davecgh/go-spew/spew"
+ "github.com/lightningnetwork/lnd/chainio"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/fn"
"github.com/lightningnetwork/lnd/input"
@@ -222,9 +223,54 @@ func (p *SweeperInput) terminated() bool {
}
}
+// isMature returns a boolean indicating whether the input has a timelock that
+// has been reached or not. The locktime found is also returned.
+func (p *SweeperInput) isMature(currentHeight uint32) (bool, uint32) {
+ locktime, _ := p.RequiredLockTime()
+ if currentHeight < locktime {
+ log.Debugf("Input %v has locktime=%v, current height is %v",
+ p, locktime, currentHeight)
+
+ return false, locktime
+ }
+
+ // If the input has a CSV that's not yet reached, we will skip
+ // this input and wait for the expiry.
+ //
+ // NOTE: We need to consider whether this input can be included in the
+ // next block or not, which means the CSV will be checked against the
+ // currentHeight plus one.
+ locktime = p.BlocksToMaturity() + p.HeightHint()
+ if currentHeight+1 < locktime {
+ log.Debugf("Input %v has CSV expiry=%v, current height is %v, "+
+ "skipped sweeping", p, locktime, currentHeight)
+
+ return false, locktime
+ }
+
+ return true, locktime
+}
+
// InputsMap is a type alias for a set of pending inputs.
type InputsMap = map[wire.OutPoint]*SweeperInput
+// inputsMapToString returns a human readable interpretation of the pending
+// inputs.
+func inputsMapToString(inputs InputsMap) string {
+ inps := make([]input.Input, 0, len(inputs))
+ for _, in := range inputs {
+ inps = append(inps, in)
+ }
+
+ prefix := "\n"
+ if len(inps) == 0 {
+ prefix = ""
+
+ }
+
+ return prefix + inputTypeSummary(inps)
+}
+
// pendingSweepsReq is an internal message we'll use to represent an external
// caller's intent to retrieve all of the pending inputs the UtxoSweeper is
// attempting to sweep.
@@ -280,6 +326,10 @@ type UtxoSweeper struct {
started uint32 // To be used atomically.
stopped uint32 // To be used atomically.
+ // Embed the blockbeat consumer struct to get access to the method
+ // `NotifyBlockProcessed` and the `BlockbeatChan`.
+ chainio.BeatConsumer
+
cfg *UtxoSweeperConfig
newInputs chan *sweepInputMessage
@@ -309,11 +359,14 @@ type UtxoSweeper struct {
// updated whenever a new block epoch is received.
currentHeight int32
- // bumpResultChan is a channel that receives broadcast results from the
+ // bumpRespChan is a channel that receives broadcast results from the
// TxPublisher.
- bumpResultChan chan *BumpResult
+ bumpRespChan chan *bumpResp
}
+// Compile-time check for the chainio.Consumer interface.
+var _ chainio.Consumer = (*UtxoSweeper)(nil)
+
// UtxoSweeperConfig contains dependencies of UtxoSweeper.
type UtxoSweeperConfig struct {
// GenSweepScript generates a P2WKH script belonging to the wallet where
@@ -387,7 +440,7 @@ type sweepInputMessage struct {
// New returns a new Sweeper instance.
func New(cfg *UtxoSweeperConfig) *UtxoSweeper {
- return &UtxoSweeper{
+ s := &UtxoSweeper{
cfg: cfg,
newInputs: make(chan *sweepInputMessage),
spendChan: make(chan *chainntnfs.SpendDetail),
@@ -395,12 +448,17 @@ func New(cfg *UtxoSweeperConfig) *UtxoSweeper {
pendingSweepsReqs: make(chan *pendingSweepsReq),
quit: make(chan struct{}),
inputs: make(InputsMap),
- bumpResultChan: make(chan *BumpResult, 100),
+ bumpRespChan: make(chan *bumpResp, 100),
}
+
+ // Mount the block consumer.
+ s.BeatConsumer = chainio.NewBeatConsumer(s.quit, s.Name())
+
+ return s
}
// Start starts the process of constructing and publish sweep txes.
-func (s *UtxoSweeper) Start() error {
+func (s *UtxoSweeper) Start(beat chainio.Blockbeat) error {
if !atomic.CompareAndSwapUint32(&s.started, 0, 1) {
return nil
}
@@ -411,49 +469,12 @@ func (s *UtxoSweeper) Start() error {
// not change from here on.
s.relayFeeRate = s.cfg.FeeEstimator.RelayFeePerKW()
- // We need to register for block epochs and retry sweeping every block.
- // We should get a notification with the current best block immediately
- // if we don't provide any epoch. We'll wait for that in the collector.
- blockEpochs, err := s.cfg.Notifier.RegisterBlockEpochNtfn(nil)
- if err != nil {
- return fmt.Errorf("register block epoch ntfn: %w", err)
- }
+ // Set the current height.
+ s.currentHeight = beat.Height()
// Start sweeper main loop.
s.wg.Add(1)
- go func() {
- defer blockEpochs.Cancel()
- defer s.wg.Done()
-
- s.collector(blockEpochs.Epochs)
-
- // The collector exited and won't longer handle incoming
- // requests. This can happen on shutdown, when the block
- // notifier shuts down before the sweeper and its clients. In
- // order to not deadlock the clients waiting for their requests
- // being handled, we handle them here and immediately return an
- // error. When the sweeper finally is shut down we can exit as
- // the clients will be notified.
- for {
- select {
- case inp := <-s.newInputs:
- inp.resultChan <- Result{
- Err: ErrSweeperShuttingDown,
- }
-
- case req := <-s.pendingSweepsReqs:
- req.errChan <- ErrSweeperShuttingDown
-
- case req := <-s.updateReqs:
- req.responseChan <- &updateResp{
- err: ErrSweeperShuttingDown,
- }
-
- case <-s.quit:
- return
- }
- }
- }()
+ go s.collector()
return nil
}
@@ -480,6 +501,11 @@ func (s *UtxoSweeper) Stop() error {
return nil
}
+// NOTE: part of the `chainio.Consumer` interface.
+func (s *UtxoSweeper) Name() string {
+ return "UtxoSweeper"
+}
+
// SweepInput sweeps inputs back into the wallet. The inputs will be batched and
// swept after the batch time window ends. A custom fee preference can be
// provided to determine what fee rate should be used for the input. Note that
@@ -502,7 +528,7 @@ func (s *UtxoSweeper) SweepInput(inp input.Input,
}
absoluteTimeLock, _ := inp.RequiredLockTime()
- log.Infof("Sweep request received: out_point=%v, witness_type=%v, "+
+ log.Debugf("Sweep request received: out_point=%v, witness_type=%v, "+
"relative_time_lock=%v, absolute_time_lock=%v, amount=%v, "+
"parent=(%v), params=(%v)", inp.OutPoint(), inp.WitnessType(),
inp.BlocksToMaturity(), absoluteTimeLock,
@@ -611,17 +637,8 @@ func (s *UtxoSweeper) removeConflictSweepDescendants(
// collector is the sweeper main loop. It processes new inputs, spend
// notifications and counts down to publication of the sweep tx.
-func (s *UtxoSweeper) collector(blockEpochs <-chan *chainntnfs.BlockEpoch) {
- // We registered for the block epochs with a nil request. The notifier
- // should send us the current best block immediately. So we need to wait
- // for it here because we need to know the current best height.
- select {
- case bestBlock := <-blockEpochs:
- s.currentHeight = bestBlock.Height
-
- case <-s.quit:
- return
- }
+func (s *UtxoSweeper) collector() {
+ defer s.wg.Done()
for {
// Clean inputs, which will remove inputs that are swept,
@@ -681,9 +698,9 @@ func (s *UtxoSweeper) collector(blockEpochs <-chan *chainntnfs.BlockEpoch) {
s.sweepPendingInputs(inputs)
}
- case result := <-s.bumpResultChan:
+ case resp := <-s.bumpRespChan:
// Handle the bump event.
- err := s.handleBumpEvent(result)
+ err := s.handleBumpEvent(resp)
if err != nil {
log.Errorf("Failed to handle bump event: %v",
err)
@@ -691,28 +708,26 @@ func (s *UtxoSweeper) collector(blockEpochs <-chan *chainntnfs.BlockEpoch) {
// A new block comes in, update the bestHeight, perform a check
// over all pending inputs and publish sweeping txns if needed.
- case epoch, ok := <-blockEpochs:
- if !ok {
- // We should stop the sweeper before stopping
- // the chain service. Otherwise it indicates an
- // error.
- log.Error("Block epoch channel closed")
-
- return
- }
-
+ case beat := <-s.BlockbeatChan:
// Update the sweeper to the best height.
- s.currentHeight = epoch.Height
+ s.currentHeight = beat.Height()
// Update the inputs with the latest height.
inputs := s.updateSweeperInputs()
log.Debugf("Received new block: height=%v, attempt "+
- "sweeping %d inputs", epoch.Height, len(inputs))
+ "sweeping %d inputs:%s", s.currentHeight,
+ len(inputs),
+ lnutils.NewLogClosure(func() string {
+ return inputsMapToString(inputs)
+ }))
// Attempt to sweep any pending inputs.
s.sweepPendingInputs(inputs)
+ // Notify we've processed the block.
+ s.NotifyBlockProcessed(beat, nil)
+
case <-s.quit:
return
}
@@ -827,6 +842,7 @@ func (s *UtxoSweeper) sweep(set InputSet) error {
DeliveryAddress: sweepAddr,
MaxFeeRate: s.cfg.MaxFeeRate.FeePerKWeight(),
StartingFeeRate: set.StartingFeeRate(),
+ Immediate: set.Immediate(),
// TODO(yy): pass the strategy here.
}
@@ -837,27 +853,13 @@ func (s *UtxoSweeper) sweep(set InputSet) error {
// Broadcast will return a read-only chan that we will listen to for
// this publish result and future RBF attempt.
- resp, err := s.cfg.Publisher.Broadcast(req)
- if err != nil {
- outpoints := make([]wire.OutPoint, len(set.Inputs()))
- for i, inp := range set.Inputs() {
- outpoints[i] = inp.OutPoint()
- }
-
- log.Errorf("Initial broadcast failed: %v, inputs=\n%v", err,
- inputTypeSummary(set.Inputs()))
-
- // TODO(yy): find out which input is causing the failure.
- s.markInputsPublishFailed(outpoints)
-
- return err
- }
+ resp := s.cfg.Publisher.Broadcast(req)
// Successfully sent the broadcast attempt, we now handle the result by
// subscribing to the result chan and listen for future updates about
// this tx.
s.wg.Add(1)
- go s.monitorFeeBumpResult(resp)
+ go s.monitorFeeBumpResult(set, resp)
return nil
}
@@ -867,14 +869,14 @@ func (s *UtxoSweeper) sweep(set InputSet) error {
func (s *UtxoSweeper) markInputsPendingPublish(set InputSet) {
// Reschedule sweep.
for _, input := range set.Inputs() {
- pi, ok := s.inputs[input.OutPoint()]
+ op := input.OutPoint()
+ pi, ok := s.inputs[op]
if !ok {
// It could be that this input is an additional wallet
// input that was attached. In that case there also
// isn't a pending input to update.
log.Tracef("Skipped marking input as pending "+
- "published: %v not found in pending inputs",
- input.OutPoint())
+ "published: %v not found in pending inputs", op)
continue
}
@@ -885,8 +887,7 @@ func (s *UtxoSweeper) markInputsPendingPublish(set InputSet) {
// publish.
if pi.terminated() {
log.Errorf("Expect input %v to not have terminated "+
- "state, instead it has %v",
- input.OutPoint, pi.state)
+ "state, instead it has %v", op, pi.state)
continue
}
@@ -901,9 +902,7 @@ func (s *UtxoSweeper) markInputsPendingPublish(set InputSet) {
// markInputsPublished updates the sweeping tx in db and marks the list of
// inputs as published.
-func (s *UtxoSweeper) markInputsPublished(tr *TxRecord,
- inputs []*wire.TxIn) error {
-
+func (s *UtxoSweeper) markInputsPublished(tr *TxRecord, set InputSet) error {
// Mark this tx in db once successfully published.
//
// NOTE: this will behave as an overwrite, which is fine as the record
@@ -915,15 +914,15 @@ func (s *UtxoSweeper) markInputsPublished(tr *TxRecord,
}
// Reschedule sweep.
- for _, input := range inputs {
- pi, ok := s.inputs[input.PreviousOutPoint]
+ for _, input := range set.Inputs() {
+ op := input.OutPoint()
+ pi, ok := s.inputs[op]
if !ok {
// It could be that this input is an additional wallet
// input that was attached. In that case there also
// isn't a pending input to update.
log.Tracef("Skipped marking input as published: %v "+
- "not found in pending inputs",
- input.PreviousOutPoint)
+ "not found in pending inputs", op)
continue
}
@@ -932,8 +931,7 @@ func (s *UtxoSweeper) markInputsPublished(tr *TxRecord,
if pi.state != PendingPublish {
// We may get a Published if this is a replacement tx.
log.Debugf("Expect input %v to have %v, instead it "+
- "has %v", input.PreviousOutPoint,
- PendingPublish, pi.state)
+ "has %v", op, PendingPublish, pi.state)
continue
}
@@ -949,9 +947,10 @@ func (s *UtxoSweeper) markInputsPublished(tr *TxRecord,
}
// markInputsPublishFailed marks the list of inputs as failed to be published.
-func (s *UtxoSweeper) markInputsPublishFailed(outpoints []wire.OutPoint) {
+func (s *UtxoSweeper) markInputsPublishFailed(set InputSet) {
// Reschedule sweep.
- for _, op := range outpoints {
+ for _, inp := range set.Inputs() {
+ op := inp.OutPoint()
pi, ok := s.inputs[op]
if !ok {
// It could be that this input is an additional wallet
@@ -1054,6 +1053,12 @@ func (s *UtxoSweeper) handlePendingSweepsReq(
resps := make(map[wire.OutPoint]*PendingInputResponse, len(s.inputs))
for _, inp := range s.inputs {
+ // Skip immature inputs for compatibility.
+ mature, _ := inp.isMature(uint32(s.currentHeight))
+ if !mature {
+ continue
+ }
+
// Only the exported fields are set, as we expect the response
// to only be consumed externally.
op := inp.OutPoint()
@@ -1189,17 +1194,34 @@ func (s *UtxoSweeper) mempoolLookup(op wire.OutPoint) fn.Option[wire.MsgTx] {
return s.cfg.Mempool.LookupInputMempoolSpend(op)
}
-// handleNewInput processes a new input by registering spend notification and
-// scheduling sweeping for it.
-func (s *UtxoSweeper) handleNewInput(input *sweepInputMessage) error {
+// calculateDefaultDeadline calculates the default deadline height for a sweep
+// request that has no deadline height specified.
+func (s *UtxoSweeper) calculateDefaultDeadline(pi *SweeperInput) int32 {
// Create a default deadline height, which will be used when there's no
// DeadlineHeight specified for a given input.
defaultDeadline := s.currentHeight + int32(s.cfg.NoDeadlineConfTarget)
+ // If the input is immature and has a locktime, we'll use the locktime
+ // height as the starting height.
+ matured, locktime := pi.isMature(uint32(s.currentHeight))
+ if !matured {
+ defaultDeadline = int32(locktime + s.cfg.NoDeadlineConfTarget)
+ log.Debugf("Input %v is immature, using locktime=%v instead "+
+ "of current height=%d as starting height",
+ pi.OutPoint(), locktime, s.currentHeight)
+ }
+
+ return defaultDeadline
+}
+
+// handleNewInput processes a new input by registering spend notification and
+// scheduling sweeping for it.
+func (s *UtxoSweeper) handleNewInput(input *sweepInputMessage) error {
outpoint := input.input.OutPoint()
pi, pending := s.inputs[outpoint]
if pending {
- log.Debugf("Already has pending input %v received", outpoint)
+ log.Infof("Already has pending input %v received, old params: "+
+ "%v, new params %v", outpoint, pi.params, input.params)
s.handleExistingInput(input, pi)
@@ -1220,15 +1242,22 @@ func (s *UtxoSweeper) handleNewInput(input *sweepInputMessage) error {
Input: input.input,
params: input.params,
rbf: rbfInfo,
- // Set the acutal deadline height.
- DeadlineHeight: input.params.DeadlineHeight.UnwrapOr(
- defaultDeadline,
- ),
}
+ // Set the acutal deadline height.
+ pi.DeadlineHeight = input.params.DeadlineHeight.UnwrapOr(
+ s.calculateDefaultDeadline(pi),
+ )
+
s.inputs[outpoint] = pi
log.Tracef("input %v, state=%v, added to inputs", outpoint, pi.state)
+ log.Infof("Registered sweep request at block %d: out_point=%v, "+
+ "witness_type=%v, amount=%v, deadline=%d, state=%v, params=(%v)",
+ s.currentHeight, pi.OutPoint(), pi.WitnessType(),
+ btcutil.Amount(pi.SignDesc().Output.Value), pi.DeadlineHeight,
+ pi.state, pi.params)
+
// Start watching for spend of this input, either by us or the remote
// party.
cancel, err := s.monitorSpend(
@@ -1457,11 +1486,6 @@ func (s *UtxoSweeper) markInputFailed(pi *SweeperInput, err error) {
pi.state = Failed
- // Remove all other inputs in this exclusive group.
- if pi.params.ExclusiveGroup != nil {
- s.removeExclusiveGroup(*pi.params.ExclusiveGroup)
- }
-
s.signalResult(pi, Result{Err: err})
}
@@ -1479,6 +1503,8 @@ func (s *UtxoSweeper) updateSweeperInputs() InputsMap {
// turn this inputs map into a SyncMap in case we wanna add concurrent
// access to the map in the future.
for op, input := range s.inputs {
+ log.Tracef("Checking input: %s, state=%v", input, input.state)
+
// If the input has reached a final state, that it's either
// been swept, or failed, or excluded, we will remove it from
// our sweeper.
@@ -1506,23 +1532,8 @@ func (s *UtxoSweeper) updateSweeperInputs() InputsMap {
// If the input has a locktime that's not yet reached, we will
// skip this input and wait for the locktime to be reached.
- locktime, _ := input.RequiredLockTime()
- if uint32(s.currentHeight) < locktime {
- log.Warnf("Skipping input %v due to locktime=%v not "+
- "reached, current height is %v", op, locktime,
- s.currentHeight)
-
- continue
- }
-
- // If the input has a CSV that's not yet reached, we will skip
- // this input and wait for the expiry.
- locktime = input.BlocksToMaturity() + input.HeightHint()
- if s.currentHeight < int32(locktime)-1 {
- log.Infof("Skipping input %v due to CSV expiry=%v not "+
- "reached, current height is %v", op, locktime,
- s.currentHeight)
-
+ mature, _ := input.isMature(uint32(s.currentHeight))
+ if !mature {
continue
}
@@ -1539,6 +1550,8 @@ func (s *UtxoSweeper) updateSweeperInputs() InputsMap {
// sweepPendingInputs is called when the ticker fires. It will create clusters
// and attempt to create and publish the sweeping transactions.
func (s *UtxoSweeper) sweepPendingInputs(inputs InputsMap) {
+ log.Debugf("Sweeping %v inputs", len(inputs))
+
// Cluster all of our inputs based on the specific Aggregator.
sets := s.cfg.Aggregator.ClusterInputs(inputs)
@@ -1580,11 +1593,24 @@ func (s *UtxoSweeper) sweepPendingInputs(inputs InputsMap) {
}
}
+// bumpResp wraps the result of a bump attempt returned from the fee bumper and
+// the inputs being used.
+type bumpResp struct {
+ // result is the result of the bump attempt returned from the fee
+ // bumper.
+ result *BumpResult
+
+ // set is the input set that was used in the bump attempt.
+ set InputSet
+}
+
// monitorFeeBumpResult subscribes to the passed result chan to listen for
// future updates about the sweeping tx.
//
// NOTE: must run as a goroutine.
-func (s *UtxoSweeper) monitorFeeBumpResult(resultChan <-chan *BumpResult) {
+func (s *UtxoSweeper) monitorFeeBumpResult(set InputSet,
+ resultChan <-chan *BumpResult) {
+
defer s.wg.Done()
for {
@@ -1596,9 +1622,14 @@ func (s *UtxoSweeper) monitorFeeBumpResult(resultChan <-chan *BumpResult) {
continue
}
+ resp := &bumpResp{
+ result: r,
+ set: set,
+ }
+
// Send the result back to the main event loop.
select {
- case s.bumpResultChan <- r:
+ case s.bumpRespChan <- resp:
case <-s.quit:
log.Debug("Sweeper shutting down, skip " +
"sending bump result")
@@ -1613,6 +1644,14 @@ func (s *UtxoSweeper) monitorFeeBumpResult(resultChan <-chan *BumpResult) {
// in sweeper and rely solely on this event to mark
// inputs as Swept?
if r.Event == TxConfirmed || r.Event == TxFailed {
+ // Exit if the tx is failed to be created.
+ if r.Tx == nil {
+ log.Debugf("Received %v for nil tx, "+
+ "exit monitor", r.Event)
+
+ return
+ }
+
log.Debugf("Received %v for sweep tx %v, exit "+
"fee bump monitor", r.Event,
r.Tx.TxHash())
@@ -1634,25 +1673,28 @@ func (s *UtxoSweeper) monitorFeeBumpResult(resultChan <-chan *BumpResult) {
// handleBumpEventTxFailed handles the case where the tx has been failed to
// publish.
-func (s *UtxoSweeper) handleBumpEventTxFailed(r *BumpResult) error {
+func (s *UtxoSweeper) handleBumpEventTxFailed(resp *bumpResp) {
+ r := resp.result
tx, err := r.Tx, r.Err
- log.Errorf("Fee bump attempt failed for tx=%v: %v", tx.TxHash(), err)
-
- outpoints := make([]wire.OutPoint, 0, len(tx.TxIn))
- for _, inp := range tx.TxIn {
- outpoints = append(outpoints, inp.PreviousOutPoint)
+ if tx != nil {
+ log.Warnf("Fee bump attempt failed for tx=%v: %v", tx.TxHash(),
+ err)
}
+ // NOTE: When marking the inputs as failed, we are using the input set
+ // instead of the inputs found in the tx. This is fine for current
+ // version of the sweeper because we always create a tx using ALL of
+ // the inputs specified by the set.
+ //
// TODO(yy): should we also remove the failed tx from db?
- s.markInputsPublishFailed(outpoints)
-
- return err
+ s.markInputsPublishFailed(resp.set)
}
// handleBumpEventTxReplaced handles the case where the sweeping tx has been
// replaced by a new one.
-func (s *UtxoSweeper) handleBumpEventTxReplaced(r *BumpResult) error {
+func (s *UtxoSweeper) handleBumpEventTxReplaced(resp *bumpResp) error {
+ r := resp.result
oldTx := r.ReplacedTx
newTx := r.Tx
@@ -1692,12 +1734,13 @@ func (s *UtxoSweeper) handleBumpEventTxReplaced(r *BumpResult) error {
}
// Mark the inputs as published using the replacing tx.
- return s.markInputsPublished(tr, r.Tx.TxIn)
+ return s.markInputsPublished(tr, resp.set)
}
// handleBumpEventTxPublished handles the case where the sweeping tx has been
// successfully published.
-func (s *UtxoSweeper) handleBumpEventTxPublished(r *BumpResult) error {
+func (s *UtxoSweeper) handleBumpEventTxPublished(resp *bumpResp) error {
+ r := resp.result
tx := r.Tx
tr := &TxRecord{
Txid: tx.TxHash(),
@@ -1707,7 +1750,7 @@ func (s *UtxoSweeper) handleBumpEventTxPublished(r *BumpResult) error {
// Inputs have been successfully published so we update their
// states.
- err := s.markInputsPublished(tr, tx.TxIn)
+ err := s.markInputsPublished(tr, resp.set)
if err != nil {
return err
}
@@ -1723,15 +1766,71 @@ func (s *UtxoSweeper) handleBumpEventTxPublished(r *BumpResult) error {
return nil
}
+// handleBumpEventTxFatal handles the case where there's an unexpected error
+// when creating or publishing the sweeping tx. In this case, the tx will be
+// removed from the sweeper store and the inputs will be marked as `Failed`,
+// which means they will not be retried.
+func (s *UtxoSweeper) handleBumpEventTxFatal(resp *bumpResp) error {
+ r := resp.result
+
+ // Remove the tx from the sweeper store if there is one. Since this is
+ // a broadcast error, it's likely there isn't a tx here.
+ if r.Tx != nil {
+ txid := r.Tx.TxHash()
+ log.Infof("Tx=%v failed with unexpected error: %v", txid, r.Err)
+
+ // Remove the tx from the sweeper db if it exists.
+ if err := s.cfg.Store.DeleteTx(txid); err != nil {
+ return fmt.Errorf("delete tx record for %v: %w", txid,
+ err)
+ }
+ }
+
+ // Mark the inputs as failed.
+ s.markInputsFailed(resp.set, r.Err)
+
+ return nil
+}
+
+// markInputsFailed marks all inputs found in the tx as failed. It will also
+// notify all the subscribers of these inputs.
+func (s *UtxoSweeper) markInputsFailed(set InputSet, err error) {
+ for _, inp := range set.Inputs() {
+ outpoint := inp.OutPoint()
+
+ input, ok := s.inputs[outpoint]
+ if !ok {
+ // It's very likely that a spending tx contains inputs
+ // that we don't know.
+ log.Tracef("Skipped marking input as failed: %v not "+
+ "found in pending inputs", outpoint)
+
+ continue
+ }
+
+ // If the input is already in a terminal state, we don't want
+ // to rewrite it, which also indicates an error as we only get
+ // an error event during the initial broadcast.
+ if input.terminated() {
+ log.Errorf("Skipped marking input=%v as failed due to "+
+ "unexpected state=%v", outpoint, input.state)
+
+ continue
+ }
+
+ s.markInputFailed(input, err)
+ }
+}
+
// handleBumpEvent handles the result sent from the bumper based on its event
// type.
//
// NOTE: TxConfirmed event is not handled, since we already subscribe to the
// input's spending event, we don't need to do anything here.
-func (s *UtxoSweeper) handleBumpEvent(r *BumpResult) error {
- log.Debugf("Received bump event [%v] for tx %v", r.Event, r.Tx.TxHash())
+func (s *UtxoSweeper) handleBumpEvent(r *bumpResp) error {
+ log.Debugf("Received bump result %v", r.result)
- switch r.Event {
+ switch r.result.Event {
// The tx has been published, we update the inputs' state and create a
// record to be stored in the sweeper db.
case TxPublished:
@@ -1739,12 +1838,18 @@ func (s *UtxoSweeper) handleBumpEvent(r *BumpResult) error {
// The tx has failed, we update the inputs' state.
case TxFailed:
- return s.handleBumpEventTxFailed(r)
+ s.handleBumpEventTxFailed(r)
+ return nil
// The tx has been replaced, we will remove the old tx and replace it
// with the new one.
case TxReplaced:
return s.handleBumpEventTxReplaced(r)
+
+ // There's a fatal error in creating the tx, we will remove the tx from
+ // the sweeper db and mark the inputs as failed.
+ case TxFatal:
+ return s.handleBumpEventTxFatal(r)
}
return nil
diff --git a/sweep/sweeper_test.go b/sweep/sweeper_test.go
index 6d9c6c3d2e..415c8240ce 100644
--- a/sweep/sweeper_test.go
+++ b/sweep/sweeper_test.go
@@ -1,6 +1,7 @@
package sweep
import (
+ "crypto/rand"
"errors"
"testing"
"time"
@@ -12,6 +13,7 @@ import (
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/fn"
"github.com/lightningnetwork/lnd/input"
+ "github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
"github.com/stretchr/testify/mock"
@@ -33,6 +35,41 @@ var (
})
)
+// createMockInput creates a mock input and saves it to the sweeper's inputs
+// map. The created input has the specified state and a random outpoint. It
+// will assert the method `OutPoint` is called at least once.
+func createMockInput(t *testing.T, s *UtxoSweeper,
+ state SweepState) *input.MockInput {
+
+ inp := &input.MockInput{}
+ t.Cleanup(func() {
+ inp.AssertExpectations(t)
+ })
+
+ randBuf := make([]byte, lntypes.HashSize)
+ _, err := rand.Read(randBuf)
+ require.NoError(t, err, "internal error, cannot generate random bytes")
+
+ randHash, err := chainhash.NewHash(randBuf)
+ require.NoError(t, err)
+
+ inp.On("OutPoint").Return(wire.OutPoint{
+ Hash: *randHash,
+ Index: 0,
+ })
+
+ // We don't do branch switches based on the witness type here so we
+ // just mock it.
+ inp.On("WitnessType").Return(input.CommitmentTimeLock).Maybe()
+
+ s.inputs[inp.OutPoint()] = &SweeperInput{
+ Input: inp,
+ state: state,
+ }
+
+ return inp
+}
+
// TestMarkInputsPendingPublish checks that given a list of inputs with
// different states, only the non-terminal state will be marked as `Published`.
func TestMarkInputsPendingPublish(t *testing.T) {
@@ -47,50 +84,21 @@ func TestMarkInputsPendingPublish(t *testing.T) {
set := &MockInputSet{}
defer set.AssertExpectations(t)
- // Create three testing inputs.
- //
- // inputNotExist specifies an input that's not found in the sweeper's
- // `pendingInputs` map.
- inputNotExist := &input.MockInput{}
- defer inputNotExist.AssertExpectations(t)
-
- inputNotExist.On("OutPoint").Return(wire.OutPoint{Index: 0})
-
- // inputInit specifies a newly created input.
- inputInit := &input.MockInput{}
- defer inputInit.AssertExpectations(t)
-
- inputInit.On("OutPoint").Return(wire.OutPoint{Index: 1})
-
- s.inputs[inputInit.OutPoint()] = &SweeperInput{
- state: Init,
- }
-
- // inputPendingPublish specifies an input that's about to be published.
- inputPendingPublish := &input.MockInput{}
- defer inputPendingPublish.AssertExpectations(t)
-
- inputPendingPublish.On("OutPoint").Return(wire.OutPoint{Index: 2})
-
- s.inputs[inputPendingPublish.OutPoint()] = &SweeperInput{
- state: PendingPublish,
- }
-
- // inputTerminated specifies an input that's terminated.
- inputTerminated := &input.MockInput{}
- defer inputTerminated.AssertExpectations(t)
-
- inputTerminated.On("OutPoint").Return(wire.OutPoint{Index: 3})
-
- s.inputs[inputTerminated.OutPoint()] = &SweeperInput{
- state: Excluded,
- }
+ // Create three inputs with different states.
+ // - inputInit specifies a newly created input.
+ // - inputPendingPublish specifies an input about to be published.
+ // - inputTerminated specifies an input that's terminated.
+ var (
+ inputInit = createMockInput(t, s, Init)
+ inputPendingPublish = createMockInput(t, s, PendingPublish)
+ inputTerminated = createMockInput(t, s, Excluded)
+ )
// Mark the test inputs. We expect the non-exist input and the
// inputTerminated to be skipped, and the rest to be marked as pending
// publish.
set.On("Inputs").Return([]input.Input{
- inputNotExist, inputInit, inputPendingPublish, inputTerminated,
+ inputInit, inputPendingPublish, inputTerminated,
})
s.markInputsPendingPublish(set)
@@ -122,36 +130,22 @@ func TestMarkInputsPublished(t *testing.T) {
dummyTR := &TxRecord{}
dummyErr := errors.New("dummy error")
+ // Create a mock input set.
+ set := &MockInputSet{}
+ defer set.AssertExpectations(t)
+
// Create a test sweeper.
s := New(&UtxoSweeperConfig{
Store: mockStore,
})
- // Create three testing inputs.
- //
- // inputNotExist specifies an input that's not found in the sweeper's
- // `inputs` map.
- inputNotExist := &wire.TxIn{
- PreviousOutPoint: wire.OutPoint{Index: 1},
- }
-
- // inputInit specifies a newly created input. When marking this as
- // published, we should see an error log as this input hasn't been
- // published yet.
- inputInit := &wire.TxIn{
- PreviousOutPoint: wire.OutPoint{Index: 2},
- }
- s.inputs[inputInit.PreviousOutPoint] = &SweeperInput{
- state: Init,
- }
-
- // inputPendingPublish specifies an input that's about to be published.
- inputPendingPublish := &wire.TxIn{
- PreviousOutPoint: wire.OutPoint{Index: 3},
- }
- s.inputs[inputPendingPublish.PreviousOutPoint] = &SweeperInput{
- state: PendingPublish,
- }
+ // Create two inputs with different states.
+ // - inputInit specifies a newly created input.
+ // - inputPendingPublish specifies an input about to be published.
+ var (
+ inputInit = createMockInput(t, s, Init)
+ inputPendingPublish = createMockInput(t, s, PendingPublish)
+ )
// First, check that when an error is returned from db, it's properly
// returned here.
@@ -171,9 +165,9 @@ func TestMarkInputsPublished(t *testing.T) {
// Mark the test inputs. We expect the non-exist input and the
// inputInit to be skipped, and the final input to be marked as
// published.
- err = s.markInputsPublished(dummyTR, []*wire.TxIn{
- inputNotExist, inputInit, inputPendingPublish,
- })
+ set.On("Inputs").Return([]input.Input{inputInit, inputPendingPublish})
+
+ err = s.markInputsPublished(dummyTR, set)
require.NoError(err)
// We expect unchanged number of pending inputs.
@@ -181,11 +175,11 @@ func TestMarkInputsPublished(t *testing.T) {
// We expect the init input's state to stay unchanged.
require.Equal(Init,
- s.inputs[inputInit.PreviousOutPoint].state)
+ s.inputs[inputInit.OutPoint()].state)
// We expect the pending-publish input's is now marked as published.
require.Equal(Published,
- s.inputs[inputPendingPublish.PreviousOutPoint].state)
+ s.inputs[inputPendingPublish.OutPoint()].state)
// Assert mocked statements are executed as expected.
mockStore.AssertExpectations(t)
@@ -202,117 +196,75 @@ func TestMarkInputsPublishFailed(t *testing.T) {
// Create a mock sweeper store.
mockStore := NewMockSweeperStore()
+ // Create a mock input set.
+ set := &MockInputSet{}
+ defer set.AssertExpectations(t)
+
// Create a test sweeper.
s := New(&UtxoSweeperConfig{
Store: mockStore,
})
- // Create testing inputs for each state.
- //
- // inputNotExist specifies an input that's not found in the sweeper's
- // `inputs` map.
- inputNotExist := &wire.TxIn{
- PreviousOutPoint: wire.OutPoint{Index: 1},
- }
-
- // inputInit specifies a newly created input. When marking this as
- // published, we should see an error log as this input hasn't been
- // published yet.
- inputInit := &wire.TxIn{
- PreviousOutPoint: wire.OutPoint{Index: 2},
- }
- s.inputs[inputInit.PreviousOutPoint] = &SweeperInput{
- state: Init,
- }
-
- // inputPendingPublish specifies an input that's about to be published.
- inputPendingPublish := &wire.TxIn{
- PreviousOutPoint: wire.OutPoint{Index: 3},
- }
- s.inputs[inputPendingPublish.PreviousOutPoint] = &SweeperInput{
- state: PendingPublish,
- }
-
- // inputPublished specifies an input that's published.
- inputPublished := &wire.TxIn{
- PreviousOutPoint: wire.OutPoint{Index: 4},
- }
- s.inputs[inputPublished.PreviousOutPoint] = &SweeperInput{
- state: Published,
- }
-
- // inputPublishFailed specifies an input that's failed to be published.
- inputPublishFailed := &wire.TxIn{
- PreviousOutPoint: wire.OutPoint{Index: 5},
- }
- s.inputs[inputPublishFailed.PreviousOutPoint] = &SweeperInput{
- state: PublishFailed,
- }
-
- // inputSwept specifies an input that's swept.
- inputSwept := &wire.TxIn{
- PreviousOutPoint: wire.OutPoint{Index: 6},
- }
- s.inputs[inputSwept.PreviousOutPoint] = &SweeperInput{
- state: Swept,
- }
-
- // inputExcluded specifies an input that's excluded.
- inputExcluded := &wire.TxIn{
- PreviousOutPoint: wire.OutPoint{Index: 7},
- }
- s.inputs[inputExcluded.PreviousOutPoint] = &SweeperInput{
- state: Excluded,
- }
-
- // inputFailed specifies an input that's failed.
- inputFailed := &wire.TxIn{
- PreviousOutPoint: wire.OutPoint{Index: 8},
- }
- s.inputs[inputFailed.PreviousOutPoint] = &SweeperInput{
- state: Failed,
- }
+ // Create inputs with different states.
+ // - inputInit specifies a newly created input. When marking this as
+ // published, we should see an error log as this input hasn't been
+ // published yet.
+ // - inputPendingPublish specifies an input about to be published.
+ // - inputPublished specifies an input that's published.
+ // - inputPublishFailed specifies an input that's failed to be
+ // published.
+ // - inputSwept specifies an input that's swept.
+ // - inputExcluded specifies an input that's excluded.
+ // - inputFailed specifies an input that's failed.
+ var (
+ inputInit = createMockInput(t, s, Init)
+ inputPendingPublish = createMockInput(t, s, PendingPublish)
+ inputPublished = createMockInput(t, s, Published)
+ inputPublishFailed = createMockInput(t, s, PublishFailed)
+ inputSwept = createMockInput(t, s, Swept)
+ inputExcluded = createMockInput(t, s, Excluded)
+ inputFailed = createMockInput(t, s, Failed)
+ )
- // Gather all inputs' outpoints.
- pendingOps := make([]wire.OutPoint, 0, len(s.inputs)+1)
- for op := range s.inputs {
- pendingOps = append(pendingOps, op)
- }
- pendingOps = append(pendingOps, inputNotExist.PreviousOutPoint)
+ // Gather all inputs.
+ set.On("Inputs").Return([]input.Input{
+ inputInit, inputPendingPublish, inputPublished,
+ inputPublishFailed, inputSwept, inputExcluded, inputFailed,
+ })
// Mark the test inputs. We expect the non-exist input and the
// inputInit to be skipped, and the final input to be marked as
// published.
- s.markInputsPublishFailed(pendingOps)
+ s.markInputsPublishFailed(set)
// We expect unchanged number of pending inputs.
require.Len(s.inputs, 7)
// We expect the init input's state to stay unchanged.
require.Equal(Init,
- s.inputs[inputInit.PreviousOutPoint].state)
+ s.inputs[inputInit.OutPoint()].state)
// We expect the pending-publish input's is now marked as publish
// failed.
require.Equal(PublishFailed,
- s.inputs[inputPendingPublish.PreviousOutPoint].state)
+ s.inputs[inputPendingPublish.OutPoint()].state)
// We expect the published input's is now marked as publish failed.
require.Equal(PublishFailed,
- s.inputs[inputPublished.PreviousOutPoint].state)
+ s.inputs[inputPublished.OutPoint()].state)
// We expect the publish failed input to stay unchanged.
require.Equal(PublishFailed,
- s.inputs[inputPublishFailed.PreviousOutPoint].state)
+ s.inputs[inputPublishFailed.OutPoint()].state)
// We expect the swept input to stay unchanged.
- require.Equal(Swept, s.inputs[inputSwept.PreviousOutPoint].state)
+ require.Equal(Swept, s.inputs[inputSwept.OutPoint()].state)
// We expect the excluded input to stay unchanged.
- require.Equal(Excluded, s.inputs[inputExcluded.PreviousOutPoint].state)
+ require.Equal(Excluded, s.inputs[inputExcluded.OutPoint()].state)
// We expect the failed input to stay unchanged.
- require.Equal(Failed, s.inputs[inputFailed.PreviousOutPoint].state)
+ require.Equal(Failed, s.inputs[inputFailed.OutPoint()].state)
// Assert mocked statements are executed as expected.
mockStore.AssertExpectations(t)
@@ -491,6 +443,7 @@ func TestUpdateSweeperInputs(t *testing.T) {
// returned.
inp2.On("RequiredLockTime").Return(
uint32(s.currentHeight+1), true).Once()
+ inp2.On("OutPoint").Return(wire.OutPoint{Index: 2}).Maybe()
input7 := &SweeperInput{state: Init, Input: inp2}
// Mock the input to have a CSV expiry in the future so it will NOT be
@@ -499,6 +452,7 @@ func TestUpdateSweeperInputs(t *testing.T) {
uint32(s.currentHeight), false).Once()
inp3.On("BlocksToMaturity").Return(uint32(2)).Once()
inp3.On("HeightHint").Return(uint32(s.currentHeight)).Once()
+ inp3.On("OutPoint").Return(wire.OutPoint{Index: 3}).Maybe()
input8 := &SweeperInput{state: Init, Input: inp3}
// Add the inputs to the sweeper. After the update, we should see the
@@ -704,11 +658,13 @@ func TestSweepPendingInputs(t *testing.T) {
setNeedWallet.On("Budget").Return(btcutil.Amount(1)).Once()
setNeedWallet.On("StartingFeeRate").Return(
fn.None[chainfee.SatPerKWeight]()).Once()
+ setNeedWallet.On("Immediate").Return(false).Once()
normalSet.On("Inputs").Return(nil).Maybe()
normalSet.On("DeadlineHeight").Return(testHeight).Once()
normalSet.On("Budget").Return(btcutil.Amount(1)).Once()
normalSet.On("StartingFeeRate").Return(
fn.None[chainfee.SatPerKWeight]()).Once()
+ normalSet.On("Immediate").Return(false).Once()
// Make pending inputs for testing. We don't need real values here as
// the returned clusters are mocked.
@@ -719,13 +675,8 @@ func TestSweepPendingInputs(t *testing.T) {
setNeedWallet, normalSet,
})
- // Mock `Broadcast` to return an error. This should cause the
- // `createSweepTx` inside `sweep` to fail. This is done so we can
- // terminate the method early as we are only interested in testing the
- // workflow in `sweepPendingInputs`. We don't need to test `sweep` here
- // as it should be tested in its own unit test.
- dummyErr := errors.New("dummy error")
- publisher.On("Broadcast", mock.Anything).Return(nil, dummyErr).Twice()
+ // Mock `Broadcast` to return a result.
+ publisher.On("Broadcast", mock.Anything).Return(nil).Twice()
// Call the method under test.
s.sweepPendingInputs(pis)
@@ -736,33 +687,33 @@ func TestSweepPendingInputs(t *testing.T) {
func TestHandleBumpEventTxFailed(t *testing.T) {
t.Parallel()
+ // Create a mock input set.
+ set := &MockInputSet{}
+ defer set.AssertExpectations(t)
+
// Create a test sweeper.
s := New(&UtxoSweeperConfig{})
- var (
- // Create four testing outpoints.
- op1 = wire.OutPoint{Hash: chainhash.Hash{1}}
- op2 = wire.OutPoint{Hash: chainhash.Hash{2}}
- op3 = wire.OutPoint{Hash: chainhash.Hash{3}}
- opNotExist = wire.OutPoint{Hash: chainhash.Hash{4}}
- )
+ // inputNotExist specifies an input that's not found in the sweeper's
+ // `pendingInputs` map.
+ inputNotExist := &input.MockInput{}
+ defer inputNotExist.AssertExpectations(t)
+ inputNotExist.On("OutPoint").Return(wire.OutPoint{Index: 0})
+ opNotExist := inputNotExist.OutPoint()
// Create three mock inputs.
- input1 := &input.MockInput{}
- defer input1.AssertExpectations(t)
-
- input2 := &input.MockInput{}
- defer input2.AssertExpectations(t)
+ var (
+ input1 = createMockInput(t, s, PendingPublish)
+ input2 = createMockInput(t, s, PendingPublish)
+ input3 = createMockInput(t, s, PendingPublish)
+ )
- input3 := &input.MockInput{}
- defer input3.AssertExpectations(t)
+ op1 := input1.OutPoint()
+ op2 := input2.OutPoint()
+ op3 := input3.OutPoint()
// Construct the initial state for the sweeper.
- s.inputs = InputsMap{
- op1: &SweeperInput{Input: input1, state: PendingPublish},
- op2: &SweeperInput{Input: input2, state: PendingPublish},
- op3: &SweeperInput{Input: input3, state: PendingPublish},
- }
+ set.On("Inputs").Return([]input.Input{input1, input2, input3})
// Create a testing tx that spends the first two inputs.
tx := &wire.MsgTx{
@@ -780,16 +731,26 @@ func TestHandleBumpEventTxFailed(t *testing.T) {
Err: errDummy,
}
+ // Create a testing bump response.
+ resp := &bumpResp{
+ result: br,
+ set: set,
+ }
+
// Call the method under test.
- err := s.handleBumpEvent(br)
- require.ErrorIs(t, err, errDummy)
+ err := s.handleBumpEvent(resp)
+ require.NoError(t, err)
// Assert the states of the first two inputs are updated.
require.Equal(t, PublishFailed, s.inputs[op1].state)
require.Equal(t, PublishFailed, s.inputs[op2].state)
- // Assert the state of the third input is not updated.
- require.Equal(t, PendingPublish, s.inputs[op3].state)
+ // Assert the state of the third input.
+ //
+ // NOTE: Although the tx doesn't spend it, we still mark this input as
+ // failed as we are treating the input set as the single source of
+ // truth.
+ require.Equal(t, PublishFailed, s.inputs[op3].state)
// Assert the non-existing input is not added to the pending inputs.
require.NotContains(t, s.inputs, opNotExist)
@@ -808,23 +769,21 @@ func TestHandleBumpEventTxReplaced(t *testing.T) {
wallet := &MockWallet{}
defer wallet.AssertExpectations(t)
+ // Create a mock input set.
+ set := &MockInputSet{}
+ defer set.AssertExpectations(t)
+
// Create a test sweeper.
s := New(&UtxoSweeperConfig{
Store: store,
Wallet: wallet,
})
- // Create a testing outpoint.
- op := wire.OutPoint{Hash: chainhash.Hash{1}}
-
// Create a mock input.
- inp := &input.MockInput{}
- defer inp.AssertExpectations(t)
+ inp := createMockInput(t, s, PendingPublish)
+ set.On("Inputs").Return([]input.Input{inp})
- // Construct the initial state for the sweeper.
- s.inputs = InputsMap{
- op: &SweeperInput{Input: inp, state: PendingPublish},
- }
+ op := inp.OutPoint()
// Create a testing tx that spends the input.
tx := &wire.MsgTx{
@@ -849,12 +808,18 @@ func TestHandleBumpEventTxReplaced(t *testing.T) {
Event: TxReplaced,
}
+ // Create a testing bump response.
+ resp := &bumpResp{
+ result: br,
+ set: set,
+ }
+
// Mock the store to return an error.
dummyErr := errors.New("dummy error")
store.On("GetTx", tx.TxHash()).Return(nil, dummyErr).Once()
// Call the method under test and assert the error is returned.
- err := s.handleBumpEventTxReplaced(br)
+ err := s.handleBumpEventTxReplaced(resp)
require.ErrorIs(t, err, dummyErr)
// Mock the store to return the old tx record.
@@ -869,7 +834,7 @@ func TestHandleBumpEventTxReplaced(t *testing.T) {
store.On("DeleteTx", tx.TxHash()).Return(dummyErr).Once()
// Call the method under test and assert the error is returned.
- err = s.handleBumpEventTxReplaced(br)
+ err = s.handleBumpEventTxReplaced(resp)
require.ErrorIs(t, err, dummyErr)
// Mock the store to return the old tx record and delete it without
@@ -889,7 +854,7 @@ func TestHandleBumpEventTxReplaced(t *testing.T) {
wallet.On("CancelRebroadcast", tx.TxHash()).Once()
// Call the method under test.
- err = s.handleBumpEventTxReplaced(br)
+ err = s.handleBumpEventTxReplaced(resp)
require.NoError(t, err)
// Assert the state of the input is updated.
@@ -905,22 +870,20 @@ func TestHandleBumpEventTxPublished(t *testing.T) {
store := &MockSweeperStore{}
defer store.AssertExpectations(t)
+ // Create a mock input set.
+ set := &MockInputSet{}
+ defer set.AssertExpectations(t)
+
// Create a test sweeper.
s := New(&UtxoSweeperConfig{
Store: store,
})
- // Create a testing outpoint.
- op := wire.OutPoint{Hash: chainhash.Hash{1}}
-
// Create a mock input.
- inp := &input.MockInput{}
- defer inp.AssertExpectations(t)
+ inp := createMockInput(t, s, PendingPublish)
+ set.On("Inputs").Return([]input.Input{inp})
- // Construct the initial state for the sweeper.
- s.inputs = InputsMap{
- op: &SweeperInput{Input: inp, state: PendingPublish},
- }
+ op := inp.OutPoint()
// Create a testing tx that spends the input.
tx := &wire.MsgTx{
@@ -936,6 +899,12 @@ func TestHandleBumpEventTxPublished(t *testing.T) {
Event: TxPublished,
}
+ // Create a testing bump response.
+ resp := &bumpResp{
+ result: br,
+ set: set,
+ }
+
// Mock the store to save the new tx record.
store.On("StoreTx", &TxRecord{
Txid: tx.TxHash(),
@@ -943,7 +912,7 @@ func TestHandleBumpEventTxPublished(t *testing.T) {
}).Return(nil).Once()
// Call the method under test.
- err := s.handleBumpEventTxPublished(br)
+ err := s.handleBumpEventTxPublished(resp)
require.NoError(t, err)
// Assert the state of the input is updated.
@@ -961,25 +930,21 @@ func TestMonitorFeeBumpResult(t *testing.T) {
wallet := &MockWallet{}
defer wallet.AssertExpectations(t)
+ // Create a mock input set.
+ set := &MockInputSet{}
+ defer set.AssertExpectations(t)
+
// Create a test sweeper.
s := New(&UtxoSweeperConfig{
Store: store,
Wallet: wallet,
})
- // Create a testing outpoint.
- op := wire.OutPoint{Hash: chainhash.Hash{1}}
-
// Create a mock input.
- inp := &input.MockInput{}
- defer inp.AssertExpectations(t)
-
- // Construct the initial state for the sweeper.
- s.inputs = InputsMap{
- op: &SweeperInput{Input: inp, state: PendingPublish},
- }
+ inp := createMockInput(t, s, PendingPublish)
// Create a testing tx that spends the input.
+ op := inp.OutPoint()
tx := &wire.MsgTx{
LockTime: 1,
TxIn: []*wire.TxIn{
@@ -1058,7 +1023,8 @@ func TestMonitorFeeBumpResult(t *testing.T) {
return resultChan
},
shouldExit: false,
- }, {
+ },
+ {
// When the sweeper is shutting down, the monitor loop
// should exit.
name: "exit on sweeper shutdown",
@@ -1085,7 +1051,7 @@ func TestMonitorFeeBumpResult(t *testing.T) {
s.wg.Add(1)
go func() {
- s.monitorFeeBumpResult(resultChan)
+ s.monitorFeeBumpResult(set, resultChan)
close(done)
}()
@@ -1111,3 +1077,125 @@ func TestMonitorFeeBumpResult(t *testing.T) {
})
}
}
+
+// TestMarkInputsFailed checks that given a list of inputs with different
+// states, the method `markInputsFailed` correctly marks the inputs as failed.
+func TestMarkInputsFailed(t *testing.T) {
+ t.Parallel()
+
+ require := require.New(t)
+
+ // Create a mock input set.
+ set := &MockInputSet{}
+ defer set.AssertExpectations(t)
+
+ // Create a test sweeper.
+ s := New(&UtxoSweeperConfig{})
+
+ // Create testing inputs for each state.
+ // - inputInit specifies a newly created input. When marking this as
+ // published, we should see an error log as this input hasn't been
+ // published yet.
+ // - inputPendingPublish specifies an input about to be published.
+ // - inputPublished specifies an input that's published.
+ // - inputPublishFailed specifies an input that's failed to be
+ // published.
+ // - inputSwept specifies an input that's swept.
+ // - inputExcluded specifies an input that's excluded.
+ // - inputFailed specifies an input that's failed.
+ var (
+ inputInit = createMockInput(t, s, Init)
+ inputPendingPublish = createMockInput(t, s, PendingPublish)
+ inputPublished = createMockInput(t, s, Published)
+ inputPublishFailed = createMockInput(t, s, PublishFailed)
+ inputSwept = createMockInput(t, s, Swept)
+ inputExcluded = createMockInput(t, s, Excluded)
+ inputFailed = createMockInput(t, s, Failed)
+ )
+
+ // Gather all inputs.
+ set.On("Inputs").Return([]input.Input{
+ inputInit, inputPendingPublish, inputPublished,
+ inputPublishFailed, inputSwept, inputExcluded, inputFailed,
+ })
+
+ // Mark the test inputs. We expect the non-exist input and
+ // inputSwept/inputExcluded/inputFailed to be skipped.
+ s.markInputsFailed(set, errDummy)
+
+ // We expect unchanged number of pending inputs.
+ require.Len(s.inputs, 7)
+
+ // We expect the init input's to be marked as failed.
+ require.Equal(Failed, s.inputs[inputInit.OutPoint()].state)
+
+ // We expect the pending-publish input to be marked as failed.
+ require.Equal(Failed, s.inputs[inputPendingPublish.OutPoint()].state)
+
+ // We expect the published input to be marked as failed.
+ require.Equal(Failed, s.inputs[inputPublished.OutPoint()].state)
+
+ // We expect the publish failed input to be markd as failed.
+ require.Equal(Failed, s.inputs[inputPublishFailed.OutPoint()].state)
+
+ // We expect the swept input to stay unchanged.
+ require.Equal(Swept, s.inputs[inputSwept.OutPoint()].state)
+
+ // We expect the excluded input to stay unchanged.
+ require.Equal(Excluded, s.inputs[inputExcluded.OutPoint()].state)
+
+ // We expect the failed input to stay unchanged.
+ require.Equal(Failed, s.inputs[inputFailed.OutPoint()].state)
+}
+
+// TestHandleBumpEventTxFatal checks that `handleBumpEventTxFatal` correctly
+// handles a `TxFatal` event.
+func TestHandleBumpEventTxFatal(t *testing.T) {
+ t.Parallel()
+
+ rt := require.New(t)
+
+ // Create a mock store.
+ store := &MockSweeperStore{}
+ defer store.AssertExpectations(t)
+
+ // Create a mock input set. We are not testing `markInputFailed` here,
+ // so the actual set doesn't matter.
+ set := &MockInputSet{}
+ defer set.AssertExpectations(t)
+ set.On("Inputs").Return(nil)
+
+ // Create a test sweeper.
+ s := New(&UtxoSweeperConfig{
+ Store: store,
+ })
+
+ // Create a dummy tx.
+ tx := &wire.MsgTx{
+ LockTime: 1,
+ }
+
+ // Create a testing bump response.
+ result := &BumpResult{
+ Err: errDummy,
+ Tx: tx,
+ }
+ resp := &bumpResp{
+ result: result,
+ set: set,
+ }
+
+ // Mock the store to return an error.
+ store.On("DeleteTx", mock.Anything).Return(errDummy).Once()
+
+ // Call the method under test and assert the error is returned.
+ err := s.handleBumpEventTxFatal(resp)
+ rt.ErrorIs(err, errDummy)
+
+ // Mock the store to return nil.
+ store.On("DeleteTx", mock.Anything).Return(nil).Once()
+
+ // Call the method under test and assert no error is returned.
+ err = s.handleBumpEventTxFatal(resp)
+ rt.NoError(err)
+}
diff --git a/sweep/tx_input_set.go b/sweep/tx_input_set.go
index ce144a8eb3..73054bdf5e 100644
--- a/sweep/tx_input_set.go
+++ b/sweep/tx_input_set.go
@@ -64,6 +64,13 @@ type InputSet interface {
// StartingFeeRate returns the max starting fee rate found in the
// inputs.
StartingFeeRate() fn.Option[chainfee.SatPerKWeight]
+
+ // Immediate returns a boolean to indicate whether the tx made from
+ // this input set should be published immediately.
+ //
+ // TODO(yy): create a new method `Params` to combine the informational
+ // methods DeadlineHeight, Budget, StartingFeeRate and Immediate.
+ Immediate() bool
}
// createWalletTxInput converts a wallet utxo into an object that can be added
@@ -414,3 +421,18 @@ func (b *BudgetInputSet) StartingFeeRate() fn.Option[chainfee.SatPerKWeight] {
return startingFeeRate
}
+
+// Immediate returns whether the inputs should be swept immediately.
+//
+// NOTE: part of the InputSet interface.
+func (b *BudgetInputSet) Immediate() bool {
+ for _, inp := range b.inputs {
+ // As long as one of the inputs is immediate, the whole set is
+ // immediate.
+ if inp.params.Immediate {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/sweep/txgenerator.go b/sweep/txgenerator.go
index 993ee9e59d..a1f77c56d9 100644
--- a/sweep/txgenerator.go
+++ b/sweep/txgenerator.go
@@ -315,5 +315,5 @@ func inputTypeSummary(inputs []input.Input) string {
part := fmt.Sprintf("%v (%v)", i.OutPoint(), i.WitnessType())
parts = append(parts, part)
}
- return strings.Join(parts, ", ")
+ return strings.Join(parts, "\n")
}
diff --git a/watchtower/wtdb/version.go b/watchtower/wtdb/version.go
index dd9c554723..7267c4d797 100644
--- a/watchtower/wtdb/version.go
+++ b/watchtower/wtdb/version.go
@@ -78,6 +78,12 @@ func getLatestDBVersion(versions []version) uint32 {
return uint32(len(versions))
}
+// LatestDBMigrationVersion returns the number of the latest existing database
+// migration version available.
+func LatestDBMigrationVersion() uint32 {
+ return getLatestDBVersion(clientDBVersions)
+}
+
// getMigrations returns a slice of all updates with a greater number that
// curVersion that need to be applied to sync up with the latest version.
func getMigrations(versions []version, curVersion uint32) []version {
@@ -91,6 +97,27 @@ func getMigrations(versions []version, curVersion uint32) []version {
return updates
}
+// CurrentDatabaseVersion reads the current database version from the database
+// and returns it.
+func CurrentDatabaseVersion(db kvdb.Backend) (uint32, error) {
+ var (
+ version uint32
+ err error
+ )
+
+ err = kvdb.View(db, func(tx kvdb.RTx) error {
+ version, err = getDBVersion(tx)
+ return err
+ }, func() {
+ version = 0
+ })
+ if err != nil {
+ return 0, err
+ }
+
+ return version, nil
+}
+
// getDBVersion retrieves the current database version from the metadata bucket
// using the dbVersionKey.
func getDBVersion(tx kvdb.RTx) (uint32, error) {
diff --git a/watchtower/wtwire/fuzz_test.go b/watchtower/wtwire/fuzz_test.go
index 5a03b84e3e..385e5a58bf 100644
--- a/watchtower/wtwire/fuzz_test.go
+++ b/watchtower/wtwire/fuzz_test.go
@@ -18,25 +18,28 @@ func prefixWithMsgType(data []byte, prefix MessageType) []byte {
return data
}
-// harness performs the actual fuzz testing of the appropriate wire message.
-// This function will check that the passed-in message passes wire length
-// checks, is a valid message once deserialized, and passes a sequence of
-// serialization and deserialization checks. Returns an int that determines
-// whether the input is unique or not.
-func harness(t *testing.T, data []byte, emptyMsg Message) {
+// wireMsgHarness performs the actual fuzz testing of the appropriate wire
+// message. This function will check that the passed-in message passes wire
+// length checks, is a valid message once deserialized, and passes a sequence of
+// serialization and deserialization checks. emptyMsg must be an empty Message
+// of the type to be fuzzed, as it is used to determine the appropriate prefix
+// bytes and max payload length for decoding.
+func wireMsgHarness(t *testing.T, data []byte, emptyMsg Message) {
t.Helper()
- // Create a reader with the byte array.
- r := bytes.NewReader(data)
-
- // Make sure byte array length (excluding 2 bytes for message type) is
- // less than max payload size for the wire message.
- payloadLen := uint32(len(data)) - 2
+ // Make sure byte array length is less than max payload size for the
+ // wire message.
+ payloadLen := uint32(len(data))
if payloadLen > emptyMsg.MaxPayloadLength(0) {
// Ignore this input - max payload constraint violated.
return
}
+ data = prefixWithMsgType(data, emptyMsg.MsgType())
+
+ // Create a reader with the byte array.
+ r := bytes.NewReader(data)
+
msg, err := ReadMessage(r, 0)
if err != nil {
return
@@ -57,120 +60,48 @@ func harness(t *testing.T, data []byte, emptyMsg Message) {
func FuzzCreateSessionReply(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
- // Prefix with MsgCreateSessionReply.
- data = prefixWithMsgType(data, MsgCreateSessionReply)
-
- // Create an empty message so that the FuzzHarness func can
- // check if the max payload constraint is violated.
- emptyMsg := CreateSessionReply{}
-
- // Pass the message into our general fuzz harness for wire
- // messages!
- harness(t, data, &emptyMsg)
+ wireMsgHarness(t, data, &CreateSessionReply{})
})
}
func FuzzCreateSession(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
- // Prefix with MsgCreateSession.
- data = prefixWithMsgType(data, MsgCreateSession)
-
- // Create an empty message so that the FuzzHarness func can
- // check if the max payload constraint is violated.
- emptyMsg := CreateSession{}
-
- // Pass the message into our general fuzz harness for wire
- // messages!
- harness(t, data, &emptyMsg)
+ wireMsgHarness(t, data, &CreateSession{})
})
}
func FuzzDeleteSessionReply(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
- // Prefix with MsgDeleteSessionReply.
- data = prefixWithMsgType(data, MsgDeleteSessionReply)
-
- // Create an empty message so that the FuzzHarness func can
- // check if the max payload constraint is violated.
- emptyMsg := DeleteSessionReply{}
-
- // Pass the message into our general fuzz harness for wire
- // messages!
- harness(t, data, &emptyMsg)
+ wireMsgHarness(t, data, &DeleteSessionReply{})
})
}
func FuzzDeleteSession(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
- // Prefix with MsgDeleteSession.
- data = prefixWithMsgType(data, MsgDeleteSession)
-
- // Create an empty message so that the FuzzHarness func can
- // check if the max payload constraint is violated.
- emptyMsg := DeleteSession{}
-
- // Pass the message into our general fuzz harness for wire
- // messages!
- harness(t, data, &emptyMsg)
+ wireMsgHarness(t, data, &DeleteSession{})
})
}
func FuzzError(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
- // Prefix with MsgError.
- data = prefixWithMsgType(data, MsgError)
-
- // Create an empty message so that the FuzzHarness func can
- // check if the max payload constraint is violated.
- emptyMsg := Error{}
-
- // Pass the message into our general fuzz harness for wire
- // messages!
- harness(t, data, &emptyMsg)
+ wireMsgHarness(t, data, &Error{})
})
}
func FuzzInit(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
- // Prefix with MsgInit.
- data = prefixWithMsgType(data, MsgInit)
-
- // Create an empty message so that the FuzzHarness func can
- // check if the max payload constraint is violated.
- emptyMsg := Init{}
-
- // Pass the message into our general fuzz harness for wire
- // messages!
- harness(t, data, &emptyMsg)
+ wireMsgHarness(t, data, &Init{})
})
}
func FuzzStateUpdateReply(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
- // Prefix with MsgStateUpdateReply.
- data = prefixWithMsgType(data, MsgStateUpdateReply)
-
- // Create an empty message so that the FuzzHarness func can
- // check if the max payload constraint is violated.
- emptyMsg := StateUpdateReply{}
-
- // Pass the message into our general fuzz harness for wire
- // messages!
- harness(t, data, &emptyMsg)
+ wireMsgHarness(t, data, &StateUpdateReply{})
})
}
func FuzzStateUpdate(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
- // Prefix with MsgStateUpdate.
- data = prefixWithMsgType(data, MsgStateUpdate)
-
- // Create an empty message so that the FuzzHarness func can
- // check if the max payload constraint is violated.
- emptyMsg := StateUpdate{}
-
- // Pass the message into our general fuzz harness for wire
- // messages!
- harness(t, data, &emptyMsg)
+ wireMsgHarness(t, data, &StateUpdate{})
})
}