Skip to content

Commit

Permalink
CCIP-1730 Implementing Healthy function in LogPoller (#584)
Browse files Browse the repository at this point in the history
  • Loading branch information
mateusz-sekara authored Mar 7, 2024
1 parent bebb0b5 commit 9f70ea4
Show file tree
Hide file tree
Showing 4 changed files with 125 additions and 0 deletions.
4 changes: 4 additions & 0 deletions core/chains/evm/logpoller/disabled.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,10 @@ func (disabled) Name() string { return "disabledLogPoller" }

func (disabled) Start(ctx context.Context) error { return ErrDisabled }

func (disabled) Healthy() error {
return ErrDisabled
}

func (disabled) Close() error { return ErrDisabled }

func (disabled) Ready() error { return ErrDisabled }
Expand Down
21 changes: 21 additions & 0 deletions core/chains/evm/logpoller/log_poller.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"sort"
"strings"
"sync"
"sync/atomic"
"time"

"github.com/ethereum/go-ethereum"
Expand All @@ -34,6 +35,7 @@ import (
//go:generate mockery --quiet --name LogPoller --output ./mocks/ --case=underscore --structname LogPoller --filename log_poller.go
type LogPoller interface {
services.Service
Healthy() error
Replay(ctx context.Context, fromBlock int64) error
ReplayAsync(fromBlock int64)
RegisterFilter(filter Filter, qopts ...pg.QOpt) error
Expand Down Expand Up @@ -70,6 +72,8 @@ const (
Unconfirmed = Confirmations(0)
)

var ErrFinalityViolated = errors.New("finality violated")

type LogPollerTest interface {
LogPoller
PollAndSaveLogs(ctx context.Context, currentBlockNumber int64)
Expand Down Expand Up @@ -118,6 +122,12 @@ type logPoller struct {
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
// This flag is raised whenever the log poller detects that the chain's finality has been violated.
// It can happen when reorg is deeper than the latest finalized block that LogPoller saw in a previous PollAndSave tick.
// Usually the only way to recover is to manually remove the offending logs and block from the database.
// LogPoller keeps running in infinite loop, so whenever the invalid state is removed from the database it should
// recover automatically without needing to restart the LogPoller.
finalityViolated *atomic.Bool
}

// NewLogPoller creates a log poller. Note there is an assumption
Expand Down Expand Up @@ -149,6 +159,7 @@ func NewLogPoller(orm ORM, ec Client, lggr logger.Logger, pollPeriod time.Durati
keepFinalizedBlocksDepth: keepFinalizedBlocksDepth,
filters: make(map[string]Filter),
filterDirty: true, // Always build Filter on first call to cache an empty filter if nothing registered yet.
finalityViolated: new(atomic.Bool),
}
}

Expand Down Expand Up @@ -203,6 +214,13 @@ func (filter *Filter) Contains(other *Filter) bool {
return true
}

func (lp *logPoller) Healthy() error {
if lp.finalityViolated.Load() {
return ErrFinalityViolated
}
return nil
}

// RegisterFilter adds the provided EventSigs and Addresses to the log poller's log filter query.
// If any eventSig is emitted from any address, it will be captured by the log poller.
// If an event matching any of the given event signatures is emitted from any of the provided Addresses,
Expand Down Expand Up @@ -758,9 +776,11 @@ func (lp *logPoller) getCurrentBlockMaybeHandleReorg(ctx context.Context, curren
// We return an error here which will cause us to restart polling from lastBlockSaved + 1
return nil, err2
}
lp.finalityViolated.Store(false)
return blockAfterLCA, nil
}
// No reorg, return current block.
lp.finalityViolated.Store(false)
return currentBlock, nil
}

Expand Down Expand Up @@ -924,6 +944,7 @@ func (lp *logPoller) findBlockAfterLCA(ctx context.Context, current *evmtypes.He
lp.lggr.Criticalw("Reorg greater than finality depth detected", "finalityTag", lp.useFinalityTag, "current", current.Number, "latestFinalized", latestFinalizedBlockNumber)
rerr := errors.New("Reorg greater than finality depth")
lp.SvcErrBuffer.Append(rerr)
lp.finalityViolated.Store(true)
return nil, rerr
}

Expand Down
82 changes: 82 additions & 0 deletions core/chains/evm/logpoller/log_poller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -984,6 +984,86 @@ func TestLogPoller_PollAndSaveLogs(t *testing.T) {
}
}

func TestLogPoller_ReorgDeeperThanFinality(t *testing.T) {
tests := []struct {
name string
finalityDepth int64
finalityTag bool
}{
{
name: "fixed finality depth without finality tag",
finalityDepth: 1,
finalityTag: false,
},
{
name: "chain finality in use",
finalityDepth: 0,
finalityTag: true,
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
th := SetupTH(t, tt.finalityTag, tt.finalityDepth, 3, 2, 1000)
// Set up a log poller listening for log emitter logs.
err := th.LogPoller.RegisterFilter(logpoller.Filter{
Name: "Test Emitter",
EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID},
Addresses: []common.Address{th.EmitterAddress1},
})
require.NoError(t, err)

// Test scenario
// Chain gen <- 1 <- 2 <- 3 (finalized) <- 4 (L1_1)
_, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(1)})
require.NoError(t, err)
th.Client.Commit()
th.Client.Commit()
th.Client.Commit()
markBlockAsFinalized(t, th, 3)

// Polling should get us the L1 log.
firstPoll := th.PollAndSaveLogs(testutils.Context(t), 1)
assert.Equal(t, int64(5), firstPoll)
assert.NoError(t, th.LogPoller.Healthy())

// Fork deeper than finality depth
// Chain gen <- 1 <- 2 <- 3 (finalized) <- 4 (L1_1)
// \ 2' <- 3' <- 4' <- 5' <- 6' (finalized) <- 7' <- 8' <- 9' <- 10' (L1_2)
lca, err := th.Client.BlockByNumber(testutils.Context(t), big.NewInt(1))
require.NoError(t, err)
require.NoError(t, th.Client.Fork(testutils.Context(t), lca.Hash()))

// Create 2'
_, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(2)})
require.NoError(t, err)
th.Client.Commit()

// Create 3-10
for i := 3; i < 10; i++ {
_, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))})
require.NoError(t, err)
th.Client.Commit()
}
markBlockAsFinalized(t, th, 6)

secondPoll := th.PollAndSaveLogs(testutils.Context(t), firstPoll)
assert.Equal(t, firstPoll, secondPoll)
assert.Equal(t, logpoller.ErrFinalityViolated, th.LogPoller.Healthy())

// Manually remove latest block from the log poller to bring it back to life
// LogPoller should be healthy again after first poll
// Chain gen <- 1
// \ 2' <- 3' <- 4' <- 5' <- 6' (finalized) <- 7' <- 8' <- 9' <- 10' (L1_2)
require.NoError(t, th.ORM.DeleteLogsAndBlocksAfter(2))
// Poll from latest
recoveryPoll := th.PollAndSaveLogs(testutils.Context(t), 1)
assert.Equal(t, int64(10), recoveryPoll)
assert.NoError(t, th.LogPoller.Healthy())
})
}
}

func TestLogPoller_PollAndSaveLogsDeepReorg(t *testing.T) {
t.Parallel()

Expand Down Expand Up @@ -1027,6 +1107,7 @@ func TestLogPoller_PollAndSaveLogsDeepReorg(t *testing.T) {
// Polling should get us the L1 log.
newStart := th.PollAndSaveLogs(testutils.Context(t), 1)
assert.Equal(t, int64(3), newStart)
assert.NoError(t, th.LogPoller.Healthy())
// Check that L1_1 has a proper data payload
lgs, err := th.ORM.SelectLogsByBlockRange(2, 2)
require.NoError(t, err)
Expand All @@ -1052,6 +1133,7 @@ func TestLogPoller_PollAndSaveLogsDeepReorg(t *testing.T) {

newStart = th.PollAndSaveLogs(testutils.Context(t), newStart)
assert.Equal(t, int64(10), newStart)
assert.NoError(t, th.LogPoller.Healthy())

// Expect L1_2 to be properly updated
lgs, err = th.ORM.SelectLogsByBlockRange(2, 2)
Expand Down
18 changes: 18 additions & 0 deletions core/chains/evm/logpoller/mocks/log_poller.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

0 comments on commit 9f70ea4

Please sign in to comment.